dannys160's picture
First Push
71bafad verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.6788556575775146,
"min": 2.6775705814361572,
"max": 2.890324354171753,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 3978.1005859375,
"min": 1855.556396484375,
"max": 4185.9599609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 19992.0,
"min": 960.0,
"max": 19992.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 19992.0,
"min": 960.0,
"max": 19992.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.1473498344421387,
"min": -0.07521140575408936,
"max": 2.1473498344421387,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 55.83109664916992,
"min": -1.2033824920654297,
"max": 55.83109664916992,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 9
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 2189.0,
"min": 2189.0,
"max": 2189.0,
"count": 9
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06899261169646885,
"min": 0.06055742186293298,
"max": 0.07794120928213256,
"count": 9
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06899261169646885,
"min": 0.06055742186293298,
"max": 0.07794120928213256,
"count": 9
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2524205738422917,
"min": 0.09193896502256393,
"max": 0.2813739899326773,
"count": 9
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.2524205738422917,
"min": 0.09193896502256393,
"max": 0.2813739899326773,
"count": 9
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.4400098559999969e-05,
"min": 1.4400098559999969e-05,
"max": 0.00089440001056,
"count": 9
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.4400098559999969e-05,
"min": 1.4400098559999969e-05,
"max": 0.00089440001056,
"count": 9
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10144,
"min": 0.10144,
"max": 0.18944000000000003,
"count": 9
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10144,
"min": 0.10144,
"max": 0.18944000000000003,
"count": 9
},
"SnowballTarget.Policy.Beta.mean": {
"value": 8.185599999999986e-05,
"min": 8.185599999999986e-05,
"max": 0.0044730559999999996,
"count": 9
},
"SnowballTarget.Policy.Beta.sum": {
"value": 8.185599999999986e-05,
"min": 8.185599999999986e-05,
"max": 0.0044730559999999996,
"count": 9
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 8.181818181818182,
"min": 2.272727272727273,
"max": 8.181818181818182,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 90.0,
"min": 5.0,
"max": 90.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 8.181818181818182,
"min": 2.272727272727273,
"max": 8.181818181818182,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 90.0,
"min": 5.0,
"max": 90.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715891532",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715891583"
},
"total": 50.979097178000075,
"count": 1,
"self": 0.4478481560000773,
"children": {
"run_training.setup": {
"total": 0.05625610599997799,
"count": 1,
"self": 0.05625610599997799
},
"TrainerController.start_learning": {
"total": 50.47499291600002,
"count": 1,
"self": 0.08132929699888791,
"children": {
"TrainerController._reset_env": {
"total": 3.4105121349999763,
"count": 1,
"self": 3.4105121349999763
},
"TrainerController.advance": {
"total": 46.89521004800122,
"count": 1871,
"self": 0.025772283999344836,
"children": {
"env_step": {
"total": 46.86943776400187,
"count": 1871,
"self": 29.633547662001774,
"children": {
"SubprocessEnvManager._take_step": {
"total": 17.20989406299998,
"count": 1871,
"self": 0.15427301700117368,
"children": {
"TorchPolicy.evaluate": {
"total": 17.055621045998805,
"count": 1871,
"self": 17.055621045998805
}
}
},
"workers": {
"total": 0.02599603900011971,
"count": 1871,
"self": 0.0,
"children": {
"worker_root": {
"total": 50.25248729600116,
"count": 1871,
"is_parallel": true,
"self": 27.664702424007373,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005320769000036307,
"count": 1,
"is_parallel": true,
"self": 0.003860969000015757,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014598000000205502,
"count": 10,
"is_parallel": true,
"self": 0.0014598000000205502
}
}
},
"UnityEnvironment.step": {
"total": 0.03632874499999161,
"count": 1,
"is_parallel": true,
"self": 0.0006854340000472803,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039413399997556553,
"count": 1,
"is_parallel": true,
"self": 0.00039413399997556553
},
"communicator.exchange": {
"total": 0.033119478000003255,
"count": 1,
"is_parallel": true,
"self": 0.033119478000003255
},
"steps_from_proto": {
"total": 0.0021296989999655125,
"count": 1,
"is_parallel": true,
"self": 0.0006112800001574215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001518418999808091,
"count": 10,
"is_parallel": true,
"self": 0.001518418999808091
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 22.58778487199379,
"count": 1870,
"is_parallel": true,
"self": 1.019031279996625,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5401405770016936,
"count": 1870,
"is_parallel": true,
"self": 0.5401405770016936
},
"communicator.exchange": {
"total": 17.625984907997008,
"count": 1870,
"is_parallel": true,
"self": 17.625984907997008
},
"steps_from_proto": {
"total": 3.4026281069984634,
"count": 1870,
"is_parallel": true,
"self": 0.6428753099964979,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.7597527970019655,
"count": 18700,
"is_parallel": true,
"self": 2.7597527970019655
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002529659999481737,
"count": 1,
"self": 0.0002529659999481737,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 46.36920315899556,
"count": 68722,
"is_parallel": true,
"self": 1.3887293310086761,
"children": {
"process_trajectory": {
"total": 25.92100388598692,
"count": 68722,
"is_parallel": true,
"self": 25.052068088986744,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8689357970001765,
"count": 4,
"is_parallel": true,
"self": 0.8689357970001765
}
}
},
"_update_policy": {
"total": 19.059469941999964,
"count": 9,
"is_parallel": true,
"self": 5.820686683999497,
"children": {
"TorchPPOOptimizer.update": {
"total": 13.238783258000467,
"count": 456,
"is_parallel": true,
"self": 13.238783258000467
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08768846999998914,
"count": 1,
"self": 0.0009447510000200054,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08674371899996913,
"count": 1,
"self": 0.08674371899996913
}
}
}
}
}
}
}