arb9p4's picture
First training of SnowballTarget
9c84a73
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9726110696792603,
"min": 0.9726110696792603,
"max": 2.8746001720428467,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9286.490234375,
"min": 9286.490234375,
"max": 29565.26171875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.419774055480957,
"min": 0.2913528084754944,
"max": 12.419774055480957,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2421.85595703125,
"min": 56.52244567871094,
"max": 2512.24267578125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07068292750673387,
"min": 0.06383855880481218,
"max": 0.07843653215597585,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2827317100269355,
"min": 0.2553542352192487,
"max": 0.3755818272539353,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2087593939082295,
"min": 0.09363252794662236,
"max": 0.2807287061331319,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.835037575632918,
"min": 0.37453011178648943,
"max": 1.4036435306656594,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.84090909090909,
"min": 2.840909090909091,
"max": 24.84090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1093.0,
"min": 125.0,
"max": 1328.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.84090909090909,
"min": 2.840909090909091,
"max": 24.84090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1093.0,
"min": 125.0,
"max": 1328.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678037413",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678037866"
},
"total": 453.531307956,
"count": 1,
"self": 0.38703549899997824,
"children": {
"run_training.setup": {
"total": 0.10937297300000637,
"count": 1,
"self": 0.10937297300000637
},
"TrainerController.start_learning": {
"total": 453.034899484,
"count": 1,
"self": 0.5591009910051525,
"children": {
"TrainerController._reset_env": {
"total": 9.495623754000007,
"count": 1,
"self": 9.495623754000007
},
"TrainerController.advance": {
"total": 442.86288316599484,
"count": 18201,
"self": 0.2903701430022352,
"children": {
"env_step": {
"total": 442.5725130229926,
"count": 18201,
"self": 304.2156956259901,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.07742201999997,
"count": 18201,
"self": 1.5288501219865225,
"children": {
"TorchPolicy.evaluate": {
"total": 136.54857189801345,
"count": 18201,
"self": 31.20146651802196,
"children": {
"TorchPolicy.sample_actions": {
"total": 105.34710537999149,
"count": 18201,
"self": 105.34710537999149
}
}
}
}
},
"workers": {
"total": 0.2793953770025155,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 451.40888098399654,
"count": 18201,
"is_parallel": true,
"self": 216.40204442400403,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005459213000051477,
"count": 1,
"is_parallel": true,
"self": 0.004158567000104085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013006459999473918,
"count": 10,
"is_parallel": true,
"self": 0.0013006459999473918
}
}
},
"UnityEnvironment.step": {
"total": 0.032797490999996626,
"count": 1,
"is_parallel": true,
"self": 0.0005615070000430933,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002801079999699141,
"count": 1,
"is_parallel": true,
"self": 0.0002801079999699141
},
"communicator.exchange": {
"total": 0.030190657999980886,
"count": 1,
"is_parallel": true,
"self": 0.030190657999980886
},
"steps_from_proto": {
"total": 0.0017652180000027329,
"count": 1,
"is_parallel": true,
"self": 0.0004065290000312416,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013586889999714913,
"count": 10,
"is_parallel": true,
"self": 0.0013586889999714913
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 235.0068365599925,
"count": 18200,
"is_parallel": true,
"self": 9.661429499995563,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.140777702995308,
"count": 18200,
"is_parallel": true,
"self": 5.140777702995308
},
"communicator.exchange": {
"total": 189.7984611590037,
"count": 18200,
"is_parallel": true,
"self": 189.7984611590037
},
"steps_from_proto": {
"total": 30.406168197997943,
"count": 18200,
"is_parallel": true,
"self": 6.7393100140326965,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.666858183965246,
"count": 182000,
"is_parallel": true,
"self": 23.666858183965246
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011271599998963211,
"count": 1,
"self": 0.00011271599998963211,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 439.6802348609966,
"count": 401073,
"is_parallel": true,
"self": 9.789929579001353,
"children": {
"process_trajectory": {
"total": 253.49455183399402,
"count": 401073,
"is_parallel": true,
"self": 252.83737233599402,
"children": {
"RLTrainer._checkpoint": {
"total": 0.657179498000005,
"count": 4,
"is_parallel": true,
"self": 0.657179498000005
}
}
},
"_update_policy": {
"total": 176.39575344800124,
"count": 90,
"is_parallel": true,
"self": 59.74407983800239,
"children": {
"TorchPPOOptimizer.update": {
"total": 116.65167360999885,
"count": 4584,
"is_parallel": true,
"self": 116.65167360999885
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11717885699999897,
"count": 1,
"self": 0.0009643710000091232,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11621448599998985,
"count": 1,
"self": 0.11621448599998985
}
}
}
}
}
}
}