albertcalin's picture
First Push
a619cb2
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8797261118888855,
"min": 0.8797261118888855,
"max": 2.859004259109497,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8399.625,
"min": 8399.625,
"max": 29279.0625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.05352783203125,
"min": 0.3830348253250122,
"max": 13.05352783203125,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2545.43798828125,
"min": 74.30875396728516,
"max": 2641.55859375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06880697645729192,
"min": 0.060467214172925145,
"max": 0.07504284206786942,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2752279058291677,
"min": 0.26261239447037454,
"max": 0.35991682233597044,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2006659236024408,
"min": 0.12339371829248015,
"max": 0.2910311992846283,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8026636944097632,
"min": 0.4935748731699206,
"max": 1.4551559964231415,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.227272727272727,
"min": 3.5454545454545454,
"max": 26.227272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1154.0,
"min": 156.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.227272727272727,
"min": 3.5454545454545454,
"max": 26.227272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1154.0,
"min": 156.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681564658",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681565188"
},
"total": 529.428303888,
"count": 1,
"self": 0.4316103169998087,
"children": {
"run_training.setup": {
"total": 0.12238719800006947,
"count": 1,
"self": 0.12238719800006947
},
"TrainerController.start_learning": {
"total": 528.8743063730001,
"count": 1,
"self": 0.7143769529911879,
"children": {
"TrainerController._reset_env": {
"total": 4.271495100000038,
"count": 1,
"self": 4.271495100000038
},
"TrainerController.advance": {
"total": 523.738016424009,
"count": 18201,
"self": 0.3942450540101845,
"children": {
"env_step": {
"total": 523.3437713699988,
"count": 18201,
"self": 383.060823922974,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.9320004900203,
"count": 18201,
"self": 2.361341631012124,
"children": {
"TorchPolicy.evaluate": {
"total": 137.57065885900818,
"count": 18201,
"self": 137.57065885900818
}
}
},
"workers": {
"total": 0.350946957004453,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 526.9204211319945,
"count": 18201,
"is_parallel": true,
"self": 234.29450520499643,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005316601000004084,
"count": 1,
"is_parallel": true,
"self": 0.0036731129998770484,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016434880001270358,
"count": 10,
"is_parallel": true,
"self": 0.0016434880001270358
}
}
},
"UnityEnvironment.step": {
"total": 0.0651889549999396,
"count": 1,
"is_parallel": true,
"self": 0.0006306619999350005,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004519359999903827,
"count": 1,
"is_parallel": true,
"self": 0.0004519359999903827
},
"communicator.exchange": {
"total": 0.06001768400005858,
"count": 1,
"is_parallel": true,
"self": 0.06001768400005858
},
"steps_from_proto": {
"total": 0.004088672999955634,
"count": 1,
"is_parallel": true,
"self": 0.0004113799998322065,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003677293000123427,
"count": 10,
"is_parallel": true,
"self": 0.003677293000123427
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 292.6259159269981,
"count": 18200,
"is_parallel": true,
"self": 11.652416725986768,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.152190812996196,
"count": 18200,
"is_parallel": true,
"self": 6.152190812996196
},
"communicator.exchange": {
"total": 238.14377475800916,
"count": 18200,
"is_parallel": true,
"self": 238.14377475800916
},
"steps_from_proto": {
"total": 36.677533630005996,
"count": 18200,
"is_parallel": true,
"self": 7.749188212041759,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.928345417964238,
"count": 182000,
"is_parallel": true,
"self": 28.928345417964238
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011116799987576087,
"count": 1,
"self": 0.00011116799987576087,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 519.4651145799562,
"count": 497701,
"is_parallel": true,
"self": 12.375785295977948,
"children": {
"process_trajectory": {
"total": 292.6979953669787,
"count": 497701,
"is_parallel": true,
"self": 291.52879556497885,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1691998019998664,
"count": 4,
"is_parallel": true,
"self": 1.1691998019998664
}
}
},
"_update_policy": {
"total": 214.39133391699954,
"count": 90,
"is_parallel": true,
"self": 75.87017831699177,
"children": {
"TorchPPOOptimizer.update": {
"total": 138.52115560000777,
"count": 4587,
"is_parallel": true,
"self": 138.52115560000777
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1503067280000323,
"count": 1,
"self": 0.0009818880000693753,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14932483999996293,
"count": 1,
"self": 0.14932483999996293
}
}
}
}
}
}
}