voxxer's picture
First Push
1371aae
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3009757995605469,
"min": 1.3009757995605469,
"max": 2.8832619190216064,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 12450.3388671875,
"min": 12450.3388671875,
"max": 29590.916015625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 10.78321361541748,
"min": 0.1922581046819687,
"max": 10.78321361541748,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2102.7265625,
"min": 37.298072814941406,
"max": 2138.6083984375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.272727272727273,
"min": 3.0,
"max": 23.272727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1024.0,
"min": 132.0,
"max": 1219.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.272727272727273,
"min": 3.0,
"max": 23.272727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1024.0,
"min": 132.0,
"max": 1219.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04854722280173983,
"min": 0.040722295108614213,
"max": 0.054452556641479445,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.09709444560347966,
"min": 0.08144459021722843,
"max": 0.16335766992443834,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2527855062017254,
"min": 0.10304775325508386,
"max": 0.3152477364329731,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5055710124034508,
"min": 0.20609550651016773,
"max": 0.8257487837006063,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.432097856000005e-06,
"min": 6.432097856000005e-06,
"max": 0.00029023200325600004,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.286419571200001e-05,
"min": 1.286419571200001e-05,
"max": 0.0007422840525719999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102144,
"min": 0.102144,
"max": 0.196744,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.204288,
"min": 0.204288,
"max": 0.5474280000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011698560000000007,
"min": 0.00011698560000000007,
"max": 0.0048375256,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023397120000000015,
"min": 0.00023397120000000015,
"max": 0.012376657199999998,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699625658",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699626121"
},
"total": 462.94353993100003,
"count": 1,
"self": 0.6918987740000375,
"children": {
"run_training.setup": {
"total": 0.07031105900000512,
"count": 1,
"self": 0.07031105900000512
},
"TrainerController.start_learning": {
"total": 462.181330098,
"count": 1,
"self": 0.5387673159966653,
"children": {
"TrainerController._reset_env": {
"total": 11.40669449500001,
"count": 1,
"self": 11.40669449500001
},
"TrainerController.advance": {
"total": 450.0789563270032,
"count": 18203,
"self": 0.247867750975729,
"children": {
"env_step": {
"total": 449.83108857602747,
"count": 18203,
"self": 309.5904530520179,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.97096221898823,
"count": 18203,
"self": 1.4068417709968344,
"children": {
"TorchPolicy.evaluate": {
"total": 138.5641204479914,
"count": 18203,
"self": 138.5641204479914
}
}
},
"workers": {
"total": 0.26967330502134246,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 461.0132925789869,
"count": 18203,
"is_parallel": true,
"self": 223.4307308279865,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007152268000027107,
"count": 1,
"is_parallel": true,
"self": 0.004202289999966524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029499780000605824,
"count": 10,
"is_parallel": true,
"self": 0.0029499780000605824
}
}
},
"UnityEnvironment.step": {
"total": 0.06792309699994803,
"count": 1,
"is_parallel": true,
"self": 0.0006344939999962662,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004126659998746618,
"count": 1,
"is_parallel": true,
"self": 0.0004126659998746618
},
"communicator.exchange": {
"total": 0.0650125909999133,
"count": 1,
"is_parallel": true,
"self": 0.0650125909999133
},
"steps_from_proto": {
"total": 0.0018633460001638014,
"count": 1,
"is_parallel": true,
"self": 0.0003962299999784591,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014671160001853423,
"count": 10,
"is_parallel": true,
"self": 0.0014671160001853423
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 237.5825617510004,
"count": 18202,
"is_parallel": true,
"self": 10.608044900001005,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.377869183996154,
"count": 18202,
"is_parallel": true,
"self": 5.377869183996154
},
"communicator.exchange": {
"total": 187.66467389399918,
"count": 18202,
"is_parallel": true,
"self": 187.66467389399918
},
"steps_from_proto": {
"total": 33.931973773004074,
"count": 18202,
"is_parallel": true,
"self": 6.300384587997996,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.631589185006078,
"count": 182020,
"is_parallel": true,
"self": 27.631589185006078
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014500400016004278,
"count": 1,
"self": 0.00014500400016004278,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 445.0605960031178,
"count": 590460,
"is_parallel": true,
"self": 12.854787390098636,
"children": {
"process_trajectory": {
"total": 313.0158412390192,
"count": 590460,
"is_parallel": true,
"self": 311.9401254470192,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0757157919999827,
"count": 4,
"is_parallel": true,
"self": 1.0757157919999827
}
}
},
"_update_policy": {
"total": 119.18996737399993,
"count": 45,
"is_parallel": true,
"self": 52.40173951000088,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.78822786399905,
"count": 2289,
"is_parallel": true,
"self": 66.78822786399905
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15676695599995583,
"count": 1,
"self": 0.0014289650000591791,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15533799099989665,
"count": 1,
"self": 0.15533799099989665
}
}
}
}
}
}
}