John Woods
First Push
bdb9c92
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.038193702697754,
"min": 1.030350923538208,
"max": 2.8613169193267822,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9935.513671875,
"min": 9935.513671875,
"max": 29334.220703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.328165054321289,
"min": 0.37729376554489136,
"max": 12.328165054321289,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2403.9921875,
"min": 73.19499206542969,
"max": 2487.3798828125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06662542372371308,
"min": 0.06255266767866309,
"max": 0.07364902402853249,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2665016948948523,
"min": 0.25021067071465236,
"max": 0.3600239780025018,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21088498173391118,
"min": 0.11551465782423631,
"max": 0.275950959909196,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8435399269356447,
"min": 0.46205863129694524,
"max": 1.37975479954598,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.5,
"min": 3.1363636363636362,
"max": 24.5,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1078.0,
"min": 138.0,
"max": 1335.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.5,
"min": 3.1363636363636362,
"max": 24.5,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1078.0,
"min": 138.0,
"max": 1335.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684980127",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684980586"
},
"total": 459.224187163,
"count": 1,
"self": 0.7930568740000581,
"children": {
"run_training.setup": {
"total": 0.042702440999960345,
"count": 1,
"self": 0.042702440999960345
},
"TrainerController.start_learning": {
"total": 458.388427848,
"count": 1,
"self": 0.5340189850081742,
"children": {
"TrainerController._reset_env": {
"total": 3.851602971000034,
"count": 1,
"self": 3.851602971000034
},
"TrainerController.advance": {
"total": 453.78148916499174,
"count": 18210,
"self": 0.2589416989995925,
"children": {
"env_step": {
"total": 453.52254746599215,
"count": 18210,
"self": 332.217590197989,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.05866190100267,
"count": 18210,
"self": 1.6407698499895105,
"children": {
"TorchPolicy.evaluate": {
"total": 119.41789205101315,
"count": 18210,
"self": 119.41789205101315
}
}
},
"workers": {
"total": 0.24629536700047083,
"count": 18210,
"self": 0.0,
"children": {
"worker_root": {
"total": 456.8339704130011,
"count": 18210,
"is_parallel": true,
"self": 215.54992536099383,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005236609000007775,
"count": 1,
"is_parallel": true,
"self": 0.003811712000128864,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014248969998789107,
"count": 10,
"is_parallel": true,
"self": 0.0014248969998789107
}
}
},
"UnityEnvironment.step": {
"total": 0.035692079000000376,
"count": 1,
"is_parallel": true,
"self": 0.0006089469999324137,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032056600002761115,
"count": 1,
"is_parallel": true,
"self": 0.00032056600002761115
},
"communicator.exchange": {
"total": 0.032587495000029776,
"count": 1,
"is_parallel": true,
"self": 0.032587495000029776
},
"steps_from_proto": {
"total": 0.0021750710000105755,
"count": 1,
"is_parallel": true,
"self": 0.0003849539998554974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017901170001550781,
"count": 10,
"is_parallel": true,
"self": 0.0017901170001550781
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 241.2840450520073,
"count": 18209,
"is_parallel": true,
"self": 9.51252943400948,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.100715559998321,
"count": 18209,
"is_parallel": true,
"self": 5.100715559998321
},
"communicator.exchange": {
"total": 194.34959157000242,
"count": 18209,
"is_parallel": true,
"self": 194.34959157000242
},
"steps_from_proto": {
"total": 32.32120848799707,
"count": 18209,
"is_parallel": true,
"self": 6.112239286994509,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.20896920100256,
"count": 182090,
"is_parallel": true,
"self": 26.20896920100256
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014046800004052784,
"count": 1,
"self": 0.00014046800004052784,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 450.277419362983,
"count": 429479,
"is_parallel": true,
"self": 9.714214477942278,
"children": {
"process_trajectory": {
"total": 245.13627762804026,
"count": 429479,
"is_parallel": true,
"self": 243.50793832804027,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6283392999999933,
"count": 4,
"is_parallel": true,
"self": 1.6283392999999933
}
}
},
"_update_policy": {
"total": 195.42692725700044,
"count": 90,
"is_parallel": true,
"self": 76.11254794399963,
"children": {
"TorchPPOOptimizer.update": {
"total": 119.31437931300081,
"count": 4587,
"is_parallel": true,
"self": 119.31437931300081
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22117625900000348,
"count": 1,
"self": 0.0011386469999479232,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22003761200005556,
"count": 1,
"self": 0.22003761200005556
}
}
}
}
}
}
}