plegg's picture
First Push
c729bb7
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9594436287879944,
"min": 0.9562177658081055,
"max": 2.8531737327575684,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9150.2138671875,
"min": 9150.2138671875,
"max": 29219.3515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.818648338317871,
"min": 0.372816264629364,
"max": 12.818648338317871,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2499.636474609375,
"min": 72.32635498046875,
"max": 2608.49072265625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07143322242236715,
"min": 0.06259749046219833,
"max": 0.07293287717609429,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2857328896894686,
"min": 0.2503899618487933,
"max": 0.36466438588047145,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19603569374657143,
"min": 0.12671897489139264,
"max": 0.28324446847625806,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7841427749862857,
"min": 0.5068758995655706,
"max": 1.4162223423812903,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.34090909090909,
"min": 3.5,
"max": 25.4,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1115.0,
"min": 154.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.34090909090909,
"min": 3.5,
"max": 25.4,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1115.0,
"min": 154.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677799704",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677800141"
},
"total": 437.4125108610001,
"count": 1,
"self": 0.38915428700011034,
"children": {
"run_training.setup": {
"total": 0.10634069900004306,
"count": 1,
"self": 0.10634069900004306
},
"TrainerController.start_learning": {
"total": 436.91701587499995,
"count": 1,
"self": 0.4602448919940798,
"children": {
"TrainerController._reset_env": {
"total": 9.690764410999918,
"count": 1,
"self": 9.690764410999918
},
"TrainerController.advance": {
"total": 426.6513068670059,
"count": 18200,
"self": 0.24394506198791532,
"children": {
"env_step": {
"total": 426.407361805018,
"count": 18200,
"self": 290.8676928240384,
"children": {
"SubprocessEnvManager._take_step": {
"total": 135.28903868097382,
"count": 18200,
"self": 1.4480200859600245,
"children": {
"TorchPolicy.evaluate": {
"total": 133.8410185950138,
"count": 18200,
"self": 29.92999258301245,
"children": {
"TorchPolicy.sample_actions": {
"total": 103.91102601200134,
"count": 18200,
"self": 103.91102601200134
}
}
}
}
},
"workers": {
"total": 0.25063030000580966,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 435.5089045399991,
"count": 18200,
"is_parallel": true,
"self": 211.72931355698802,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005494048000059593,
"count": 1,
"is_parallel": true,
"self": 0.004125366999915059,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013686810001445338,
"count": 10,
"is_parallel": true,
"self": 0.0013686810001445338
}
}
},
"UnityEnvironment.step": {
"total": 0.05966171700003997,
"count": 1,
"is_parallel": true,
"self": 0.001510558000063611,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038364300007742713,
"count": 1,
"is_parallel": true,
"self": 0.00038364300007742713
},
"communicator.exchange": {
"total": 0.05512859199996001,
"count": 1,
"is_parallel": true,
"self": 0.05512859199996001
},
"steps_from_proto": {
"total": 0.0026389239999389247,
"count": 1,
"is_parallel": true,
"self": 0.0004312199999958466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002207703999943078,
"count": 10,
"is_parallel": true,
"self": 0.002207703999943078
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 223.77959098301108,
"count": 18199,
"is_parallel": true,
"self": 9.0973562320371,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.074898057993664,
"count": 18199,
"is_parallel": true,
"self": 5.074898057993664
},
"communicator.exchange": {
"total": 180.1649235109868,
"count": 18199,
"is_parallel": true,
"self": 180.1649235109868
},
"steps_from_proto": {
"total": 29.442413181993516,
"count": 18199,
"is_parallel": true,
"self": 6.352609465028763,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.089803716964752,
"count": 181990,
"is_parallel": true,
"self": 23.089803716964752
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.632100015238393e-05,
"count": 1,
"self": 3.632100015238393e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 423.4694437350264,
"count": 405970,
"is_parallel": true,
"self": 9.29637680004862,
"children": {
"process_trajectory": {
"total": 245.50884282797892,
"count": 405970,
"is_parallel": true,
"self": 244.85566764297937,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6531751849995544,
"count": 4,
"is_parallel": true,
"self": 0.6531751849995544
}
}
},
"_update_policy": {
"total": 168.66422410699886,
"count": 90,
"is_parallel": true,
"self": 59.01190349800629,
"children": {
"TorchPPOOptimizer.update": {
"total": 109.65232060899257,
"count": 4587,
"is_parallel": true,
"self": 109.65232060899257
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11466338399986853,
"count": 1,
"self": 0.0008232569998654071,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11384012700000312,
"count": 1,
"self": 0.11384012700000312
}
}
}
}
}
}
}