AdirK's picture
First Push
1e10f75
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9800288081169128,
"min": 0.9508116841316223,
"max": 2.7789478302001953,
"count": 12
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 24277.2734375,
"min": 23679.013671875,
"max": 70276.8125,
"count": 12
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 24968.0,
"max": 299968.0,
"count": 12
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 24968.0,
"max": 299968.0,
"count": 12
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.495877265930176,
"min": 0.9311500191688538,
"max": 11.639033317565918,
"count": 12
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 4276.46630859375,
"min": 345.4566650390625,
"max": 4434.4716796875,
"count": 12
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 12
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 24079.0,
"min": 24079.0,
"max": 26268.0,
"count": 12
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.031292565853977015,
"min": 0.028887493386829385,
"max": 0.036069306169255556,
"count": 12
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.34421822439374716,
"min": 0.3177624272551232,
"max": 0.3967623678618111,
"count": 12
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22290277006951242,
"min": 0.16587670311105976,
"max": 0.32108023317737705,
"count": 12
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 2.4519304707646365,
"min": 1.8246437342216573,
"max": 3.852962798128525,
"count": 12
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.931247549575757e-06,
"min": 7.931247549575757e-06,
"max": 0.00019125867103733337,
"count": 12
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.724372304533333e-05,
"min": 8.724372304533333e-05,
"max": 0.002103845381410667,
"count": 12
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10396557575757576,
"min": 0.10396557575757576,
"max": 0.19562933333333338,
"count": 12
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 1.1436213333333334,
"min": 1.1436213333333334,
"max": 2.151922666666667,
"count": 12
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00020788223030303034,
"min": 0.00020788223030303034,
"max": 0.004781903733333334,
"count": 12
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0022867045333333337,
"min": 0.0022867045333333337,
"max": 0.05260094106666667,
"count": 12
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.672131147540984,
"min": 4.090909090909091,
"max": 22.672131147540984,
"count": 12
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 2766.0,
"min": 495.0,
"max": 2962.0,
"count": 12
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.672131147540984,
"min": 4.090909090909091,
"max": 22.672131147540984,
"count": 12
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 2766.0,
"min": 495.0,
"max": 2962.0,
"count": 12
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 12
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 12
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692113124",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692113857"
},
"total": 732.3507480830001,
"count": 1,
"self": 0.43636400900004446,
"children": {
"run_training.setup": {
"total": 0.04889503100002912,
"count": 1,
"self": 0.04889503100002912
},
"TrainerController.start_learning": {
"total": 731.865489043,
"count": 1,
"self": 0.9532104189856909,
"children": {
"TrainerController._reset_env": {
"total": 4.3442253489999985,
"count": 1,
"self": 4.3442253489999985
},
"TrainerController.advance": {
"total": 726.3049707020141,
"count": 27334,
"self": 0.49745583401295335,
"children": {
"env_step": {
"total": 725.8075148680011,
"count": 27334,
"self": 496.18179559901273,
"children": {
"SubprocessEnvManager._take_step": {
"total": 229.1289226309716,
"count": 27334,
"self": 3.2747361989690944,
"children": {
"TorchPolicy.evaluate": {
"total": 225.8541864320025,
"count": 27334,
"self": 225.8541864320025
}
}
},
"workers": {
"total": 0.4967966380168036,
"count": 27334,
"self": 0.0,
"children": {
"worker_root": {
"total": 728.8771389570077,
"count": 27334,
"is_parallel": true,
"self": 315.3310434219886,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005533331999913571,
"count": 1,
"is_parallel": true,
"self": 0.0037747590004073572,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017585729995062138,
"count": 10,
"is_parallel": true,
"self": 0.0017585729995062138
}
}
},
"UnityEnvironment.step": {
"total": 0.040218830000185335,
"count": 1,
"is_parallel": true,
"self": 0.0006852770000023156,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00030996899999991,
"count": 1,
"is_parallel": true,
"self": 0.00030996899999991
},
"communicator.exchange": {
"total": 0.033822617000168975,
"count": 1,
"is_parallel": true,
"self": 0.033822617000168975
},
"steps_from_proto": {
"total": 0.005400967000014134,
"count": 1,
"is_parallel": true,
"self": 0.0034051300003739016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019958369996402325,
"count": 10,
"is_parallel": true,
"self": 0.0019958369996402325
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 413.54609553501905,
"count": 27333,
"is_parallel": true,
"self": 17.41899566299412,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.711704566012031,
"count": 27333,
"is_parallel": true,
"self": 8.711704566012031
},
"communicator.exchange": {
"total": 328.3310167800007,
"count": 27333,
"is_parallel": true,
"self": 328.3310167800007
},
"steps_from_proto": {
"total": 59.08437852601219,
"count": 27333,
"is_parallel": true,
"self": 11.080037703987728,
"children": {
"_process_rank_one_or_two_observation": {
"total": 48.00434082202446,
"count": 273330,
"is_parallel": true,
"self": 48.00434082202446
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012770500006809016,
"count": 1,
"self": 0.00012770500006809016,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 719.1369516098957,
"count": 836268,
"is_parallel": true,
"self": 19.445101581917015,
"children": {
"process_trajectory": {
"total": 463.00115684297884,
"count": 836268,
"is_parallel": true,
"self": 460.5215199619786,
"children": {
"RLTrainer._checkpoint": {
"total": 2.4796368810002605,
"count": 6,
"is_parallel": true,
"self": 2.4796368810002605
}
}
},
"_update_policy": {
"total": 236.69069318499987,
"count": 136,
"is_parallel": true,
"self": 134.3734167989976,
"children": {
"TorchPPOOptimizer.update": {
"total": 102.31727638600228,
"count": 1632,
"is_parallel": true,
"self": 102.31727638600228
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.262954868000179,
"count": 1,
"self": 0.0011459140000624757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2618089540001165,
"count": 1,
"self": 0.2618089540001165
}
}
}
}
}
}
}