Davide Paolini
First Push
8f32444
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9605424404144287,
"min": 0.9605424404144287,
"max": 2.8526902198791504,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9181.8251953125,
"min": 9181.8251953125,
"max": 29245.78125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.587767124176025,
"min": 0.30893564224243164,
"max": 4.587767124176025,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 894.6146240234375,
"min": 59.93351745605469,
"max": 929.6832275390625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06519283968946718,
"min": 0.0616772378448171,
"max": 0.07197350874458767,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2607713587578687,
"min": 0.2467089513792684,
"max": 0.3598675437229384,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.13959977416431202,
"min": 0.09906428751316579,
"max": 0.1905353519846411,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5583990966572481,
"min": 0.39625715005266315,
"max": 0.9290165623321253,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.3470097306000012e-05,
"min": 1.3470097306000012e-05,
"max": 0.00048647000270600005,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.388038922400005e-05,
"min": 5.388038922400005e-05,
"max": 0.0023086000382800003,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269399999999998,
"min": 0.10269399999999998,
"max": 0.197294,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4107759999999999,
"min": 0.4107759999999999,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.004864970599999999,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.818181818181817,
"min": 4.113636363636363,
"max": 26.818181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1180.0,
"min": 181.0,
"max": 1470.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.818181818181817,
"min": 4.113636363636363,
"max": 26.818181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1180.0,
"min": 181.0,
"max": 1470.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683619703",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683620236"
},
"total": 532.26338107,
"count": 1,
"self": 0.4348443700000644,
"children": {
"run_training.setup": {
"total": 0.040970850000007886,
"count": 1,
"self": 0.040970850000007886
},
"TrainerController.start_learning": {
"total": 531.78756585,
"count": 1,
"self": 0.5534115969778668,
"children": {
"TrainerController._reset_env": {
"total": 3.979671144000008,
"count": 1,
"self": 3.979671144000008
},
"TrainerController.advance": {
"total": 527.1221368160224,
"count": 18201,
"self": 0.28456573803237006,
"children": {
"env_step": {
"total": 526.83757107799,
"count": 18201,
"self": 401.1176782240002,
"children": {
"SubprocessEnvManager._take_step": {
"total": 125.44424463199789,
"count": 18201,
"self": 1.7394290390035394,
"children": {
"TorchPolicy.evaluate": {
"total": 123.70481559299435,
"count": 18201,
"self": 123.70481559299435
}
}
},
"workers": {
"total": 0.27564822199190075,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 530.278142795013,
"count": 18201,
"is_parallel": true,
"self": 279.7022593470268,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004954949000079978,
"count": 1,
"is_parallel": true,
"self": 0.0032058990001360144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017490499999439635,
"count": 10,
"is_parallel": true,
"self": 0.0017490499999439635
}
}
},
"UnityEnvironment.step": {
"total": 0.03441349500008073,
"count": 1,
"is_parallel": true,
"self": 0.0005659900001546703,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032053999996151106,
"count": 1,
"is_parallel": true,
"self": 0.00032053999996151106
},
"communicator.exchange": {
"total": 0.03168350199996439,
"count": 1,
"is_parallel": true,
"self": 0.03168350199996439
},
"steps_from_proto": {
"total": 0.0018434630000001562,
"count": 1,
"is_parallel": true,
"self": 0.00035063500001797365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014928279999821825,
"count": 10,
"is_parallel": true,
"self": 0.0014928279999821825
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 250.57588344798614,
"count": 18200,
"is_parallel": true,
"self": 9.838157644995022,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.28065376399627,
"count": 18200,
"is_parallel": true,
"self": 5.28065376399627
},
"communicator.exchange": {
"total": 201.93809926898348,
"count": 18200,
"is_parallel": true,
"self": 201.93809926898348
},
"steps_from_proto": {
"total": 33.51897277001137,
"count": 18200,
"is_parallel": true,
"self": 6.502538637997759,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.016434132013615,
"count": 182000,
"is_parallel": true,
"self": 27.016434132013615
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001190659997973853,
"count": 1,
"self": 0.0001190659997973853,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 523.6924109909505,
"count": 418951,
"is_parallel": true,
"self": 9.708386521938678,
"children": {
"process_trajectory": {
"total": 254.0783212280112,
"count": 418951,
"is_parallel": true,
"self": 253.0390157430112,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0393054849999999,
"count": 4,
"is_parallel": true,
"self": 1.0393054849999999
}
}
},
"_update_policy": {
"total": 259.90570324100065,
"count": 90,
"is_parallel": true,
"self": 106.20304863999922,
"children": {
"TorchPPOOptimizer.update": {
"total": 153.70265460100143,
"count": 7645,
"is_parallel": true,
"self": 153.70265460100143
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13222722699993028,
"count": 1,
"self": 0.0008800170000995422,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13134720999983074,
"count": 1,
"self": 0.13134720999983074
}
}
}
}
}
}
}