JuneV's picture
First Push
6d1a039
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0430805683135986,
"min": 1.0430805683135986,
"max": 2.870999574661255,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9982.28125,
"min": 9982.28125,
"max": 29559.8125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.380651473999023,
"min": 0.41055312752723694,
"max": 12.45952320098877,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2414.22705078125,
"min": 79.64730834960938,
"max": 2531.13232421875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06655441671746837,
"min": 0.05881946563963806,
"max": 0.07534095324945692,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26621766686987347,
"min": 0.2554512399057036,
"max": 0.37670476624728455,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.23552296960762903,
"min": 0.12667855343954892,
"max": 0.3327737209259295,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9420918784305161,
"min": 0.5067142137581957,
"max": 1.6638686046296476,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.65909090909091,
"min": 3.340909090909091,
"max": 24.727272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1085.0,
"min": 147.0,
"max": 1360.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.65909090909091,
"min": 3.340909090909091,
"max": 24.727272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1085.0,
"min": 147.0,
"max": 1360.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700122939",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700123402"
},
"total": 463.152802091,
"count": 1,
"self": 0.7294393010000704,
"children": {
"run_training.setup": {
"total": 0.04992136999999275,
"count": 1,
"self": 0.04992136999999275
},
"TrainerController.start_learning": {
"total": 462.37344141999995,
"count": 1,
"self": 0.5325061319978772,
"children": {
"TrainerController._reset_env": {
"total": 9.011505309999961,
"count": 1,
"self": 9.011505309999961
},
"TrainerController.advance": {
"total": 452.66486456300214,
"count": 18206,
"self": 0.24500317900367463,
"children": {
"env_step": {
"total": 452.41986138399847,
"count": 18206,
"self": 311.28007971699816,
"children": {
"SubprocessEnvManager._take_step": {
"total": 140.87599382699972,
"count": 18206,
"self": 1.40463041099855,
"children": {
"TorchPolicy.evaluate": {
"total": 139.47136341600117,
"count": 18206,
"self": 139.47136341600117
}
}
},
"workers": {
"total": 0.26378784000058886,
"count": 18206,
"self": 0.0,
"children": {
"worker_root": {
"total": 461.03409829899095,
"count": 18206,
"is_parallel": true,
"self": 225.85996375799124,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005943199000000732,
"count": 1,
"is_parallel": true,
"self": 0.004471827999964262,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00147137100003647,
"count": 10,
"is_parallel": true,
"self": 0.00147137100003647
}
}
},
"UnityEnvironment.step": {
"total": 0.0346979690000353,
"count": 1,
"is_parallel": true,
"self": 0.0006267290000323555,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005489540000098714,
"count": 1,
"is_parallel": true,
"self": 0.0005489540000098714
},
"communicator.exchange": {
"total": 0.03169036799999958,
"count": 1,
"is_parallel": true,
"self": 0.03169036799999958
},
"steps_from_proto": {
"total": 0.0018319179999934931,
"count": 1,
"is_parallel": true,
"self": 0.00035018399995578875,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014817340000377044,
"count": 10,
"is_parallel": true,
"self": 0.0014817340000377044
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 235.17413454099972,
"count": 18205,
"is_parallel": true,
"self": 10.43471590500991,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.331044070992789,
"count": 18205,
"is_parallel": true,
"self": 5.331044070992789
},
"communicator.exchange": {
"total": 186.61918964600352,
"count": 18205,
"is_parallel": true,
"self": 186.61918964600352
},
"steps_from_proto": {
"total": 32.7891849189935,
"count": 18205,
"is_parallel": true,
"self": 6.116570912986276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.672614006007223,
"count": 182050,
"is_parallel": true,
"self": 26.672614006007223
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016424099999312602,
"count": 1,
"self": 0.00016424099999312602,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 448.9533564960385,
"count": 475443,
"is_parallel": true,
"self": 9.69019578204228,
"children": {
"process_trajectory": {
"total": 253.0326083349958,
"count": 475443,
"is_parallel": true,
"self": 252.02077165999572,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0118366750000973,
"count": 4,
"is_parallel": true,
"self": 1.0118366750000973
}
}
},
"_update_policy": {
"total": 186.23055237900041,
"count": 90,
"is_parallel": true,
"self": 61.50642759900086,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.72412477999956,
"count": 4584,
"is_parallel": true,
"self": 124.72412477999956
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1644011739999769,
"count": 1,
"self": 0.0011989229999471718,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16320225100002972,
"count": 1,
"self": 0.16320225100002972
}
}
}
}
}
}
}