mgmeskill's picture
First Push
552c617
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0732603073120117,
"min": 1.0732603073120117,
"max": 2.8673605918884277,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10282.9072265625,
"min": 10282.9072265625,
"max": 29333.09765625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.764761924743652,
"min": 0.22299404442310333,
"max": 11.764761924743652,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2294.128662109375,
"min": 43.26084518432617,
"max": 2366.134765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06701173867166325,
"min": 0.06435791394261267,
"max": 0.07276825311351322,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.268046954686653,
"min": 0.2594403299604606,
"max": 0.3516860720707222,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2031152889716859,
"min": 0.12648915066885963,
"max": 0.27281542855653756,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8124611558867436,
"min": 0.5059566026754385,
"max": 1.3467586931644702,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.477272727272727,
"min": 2.9545454545454546,
"max": 23.477272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1033.0,
"min": 130.0,
"max": 1272.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.477272727272727,
"min": 2.9545454545454546,
"max": 23.477272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1033.0,
"min": 130.0,
"max": 1272.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690667749",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690668225"
},
"total": 476.10949766300007,
"count": 1,
"self": 0.43540280300004497,
"children": {
"run_training.setup": {
"total": 0.03617967200000294,
"count": 1,
"self": 0.03617967200000294
},
"TrainerController.start_learning": {
"total": 475.637915188,
"count": 1,
"self": 0.5561777380112289,
"children": {
"TrainerController._reset_env": {
"total": 6.724936955000032,
"count": 1,
"self": 6.724936955000032
},
"TrainerController.advance": {
"total": 468.2075254389887,
"count": 18204,
"self": 0.2701097929851244,
"children": {
"env_step": {
"total": 467.93741564600356,
"count": 18204,
"self": 338.216636814997,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.44077087800355,
"count": 18204,
"self": 1.893805489998897,
"children": {
"TorchPolicy.evaluate": {
"total": 127.54696538800465,
"count": 18204,
"self": 127.54696538800465
}
}
},
"workers": {
"total": 0.2800079530030075,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 474.09771140800126,
"count": 18204,
"is_parallel": true,
"self": 226.42490357900755,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006307617000004484,
"count": 1,
"is_parallel": true,
"self": 0.00469923399992922,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016083830000752641,
"count": 10,
"is_parallel": true,
"self": 0.0016083830000752641
}
}
},
"UnityEnvironment.step": {
"total": 0.0939640390000136,
"count": 1,
"is_parallel": true,
"self": 0.0006714200000033088,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004210259999695154,
"count": 1,
"is_parallel": true,
"self": 0.0004210259999695154
},
"communicator.exchange": {
"total": 0.09063362300003064,
"count": 1,
"is_parallel": true,
"self": 0.09063362300003064
},
"steps_from_proto": {
"total": 0.0022379700000101366,
"count": 1,
"is_parallel": true,
"self": 0.000441203000036694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017967669999734426,
"count": 10,
"is_parallel": true,
"self": 0.0017967669999734426
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 247.6728078289937,
"count": 18203,
"is_parallel": true,
"self": 10.727878885009147,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.234757822000233,
"count": 18203,
"is_parallel": true,
"self": 5.234757822000233
},
"communicator.exchange": {
"total": 196.19607679499478,
"count": 18203,
"is_parallel": true,
"self": 196.19607679499478
},
"steps_from_proto": {
"total": 35.51409432698955,
"count": 18203,
"is_parallel": true,
"self": 6.539636991985503,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.974457335004047,
"count": 182030,
"is_parallel": true,
"self": 28.974457335004047
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015610299999480048,
"count": 1,
"self": 0.00015610299999480048,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 464.532719039993,
"count": 431243,
"is_parallel": true,
"self": 10.164241009993248,
"children": {
"process_trajectory": {
"total": 255.22730520300036,
"count": 431243,
"is_parallel": true,
"self": 253.9664912430004,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2608139599999504,
"count": 4,
"is_parallel": true,
"self": 1.2608139599999504
}
}
},
"_update_policy": {
"total": 199.14117282699942,
"count": 90,
"is_parallel": true,
"self": 75.8056922760012,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.33548055099823,
"count": 4587,
"is_parallel": true,
"self": 123.33548055099823
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14911895300008382,
"count": 1,
"self": 0.0009068960001741289,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1482120569999097,
"count": 1,
"self": 0.1482120569999097
}
}
}
}
}
}
}