cokungu1's picture
First Push
5a4c27c verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.47769731283187866,
"min": 0.47254082560539246,
"max": 2.8571043014526367,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4918.37158203125,
"min": 4578.27294921875,
"max": 29165.3203125,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.342301368713379,
"min": 0.5176308155059814,
"max": 14.484357833862305,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2940.171875,
"min": 100.42037963867188,
"max": 2962.50634765625,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0663246093333845,
"min": 0.05824315849588625,
"max": 0.08036968270579226,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3316230466669225,
"min": 0.232972633983545,
"max": 0.3787560095113483,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15127921639119876,
"min": 0.1328227692374996,
"max": 0.31170841240707564,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7563960819559938,
"min": 0.5312910769499984,
"max": 1.4793011471629143,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032997656000039e-07,
"min": 7.032997656000039e-07,
"max": 0.00029918820027059994,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.5164988280000196e-06,
"min": 3.5164988280000196e-06,
"max": 0.0014885160038279998,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10023440000000002,
"min": 0.10023440000000002,
"max": 0.1997294,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5011720000000001,
"min": 0.4029176,
"max": 0.996172,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000067e-05,
"min": 2.1696560000000067e-05,
"max": 0.004986497059999999,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010848280000000034,
"min": 0.00010848280000000034,
"max": 0.024808982800000004,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.163636363636364,
"min": 3.7954545454545454,
"max": 28.727272727272727,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1549.0,
"min": 167.0,
"max": 1569.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.163636363636364,
"min": 3.7954545454545454,
"max": 28.727272727272727,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1549.0,
"min": 167.0,
"max": 1569.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741191939",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741196155"
},
"total": 4215.974285075001,
"count": 1,
"self": 0.5367148030018143,
"children": {
"run_training.setup": {
"total": 0.024830410000049596,
"count": 1,
"self": 0.024830410000049596
},
"TrainerController.start_learning": {
"total": 4215.412739861999,
"count": 1,
"self": 3.385256657136779,
"children": {
"TrainerController._reset_env": {
"total": 3.275967374999709,
"count": 1,
"self": 3.275967374999709
},
"TrainerController.advance": {
"total": 4208.662899835863,
"count": 181864,
"self": 3.5929129956402903,
"children": {
"env_step": {
"total": 2964.321706550058,
"count": 181864,
"self": 2262.4544916791338,
"children": {
"SubprocessEnvManager._take_step": {
"total": 699.83378167916,
"count": 181864,
"self": 12.34118877235369,
"children": {
"TorchPolicy.evaluate": {
"total": 687.4925929068063,
"count": 181864,
"self": 687.4925929068063
}
}
},
"workers": {
"total": 2.0334331917642885,
"count": 181864,
"self": 0.0,
"children": {
"worker_root": {
"total": 4203.174156203948,
"count": 181864,
"is_parallel": true,
"self": 2214.7898374430074,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006570600000031845,
"count": 1,
"is_parallel": true,
"self": 0.0049882190000971605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001582380999934685,
"count": 10,
"is_parallel": true,
"self": 0.001582380999934685
}
}
},
"UnityEnvironment.step": {
"total": 0.03880617799995889,
"count": 1,
"is_parallel": true,
"self": 0.000575290999677236,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039038200020513614,
"count": 1,
"is_parallel": true,
"self": 0.00039038200020513614
},
"communicator.exchange": {
"total": 0.03602345099989179,
"count": 1,
"is_parallel": true,
"self": 0.03602345099989179
},
"steps_from_proto": {
"total": 0.0018170540001847257,
"count": 1,
"is_parallel": true,
"self": 0.0003507580008772493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014662959993074764,
"count": 10,
"is_parallel": true,
"self": 0.0014662959993074764
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1988.3843187609405,
"count": 181863,
"is_parallel": true,
"self": 95.09731899815233,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 53.16440353499593,
"count": 181863,
"is_parallel": true,
"self": 53.16440353499593
},
"communicator.exchange": {
"total": 1534.9840766239145,
"count": 181863,
"is_parallel": true,
"self": 1534.9840766239145
},
"steps_from_proto": {
"total": 305.13851960387774,
"count": 181863,
"is_parallel": true,
"self": 54.046394857766245,
"children": {
"_process_rank_one_or_two_observation": {
"total": 251.0921247461115,
"count": 1818630,
"is_parallel": true,
"self": 251.0921247461115
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1240.748280290164,
"count": 181864,
"self": 4.087213294227695,
"children": {
"process_trajectory": {
"total": 264.31764835694275,
"count": 181864,
"self": 260.21416006594427,
"children": {
"RLTrainer._checkpoint": {
"total": 4.1034882909984844,
"count": 40,
"self": 4.1034882909984844
}
}
},
"_update_policy": {
"total": 972.3434186389936,
"count": 909,
"self": 389.4461087280356,
"children": {
"TorchPPOOptimizer.update": {
"total": 582.897309910958,
"count": 46356,
"self": 582.897309910958
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.369997885893099e-07,
"count": 1,
"self": 8.369997885893099e-07
},
"TrainerController._save_models": {
"total": 0.08861515700027667,
"count": 1,
"self": 0.0009474660009800573,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08766769099929661,
"count": 1,
"self": 0.08766769099929661
}
}
}
}
}
}
}