Englios's picture
First Push
8a7ba4f
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.4841200113296509,
"min": 0.4665323495864868,
"max": 0.5882323384284973,
"count": 15
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4984.49951171875,
"min": 4500.84814453125,
"max": 6056.4404296875,
"count": 15
},
"SnowballTarget.Step.mean": {
"value": 399992.0,
"min": 259960.0,
"max": 399992.0,
"count": 15
},
"SnowballTarget.Step.sum": {
"value": 399992.0,
"min": 259960.0,
"max": 399992.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.538875579833984,
"min": 8.303239822387695,
"max": 13.613066673278809,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2775.469482421875,
"min": 1610.82861328125,
"max": 2779.3369140625,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07355025226219473,
"min": 0.06287583968780167,
"max": 0.07429995013517328,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3677512613109737,
"min": 0.27148731638119306,
"max": 0.3714997506758664,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20143492419053524,
"min": 0.15505613469902207,
"max": 0.25989641009501235,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0071746209526762,
"min": 0.7169683742172577,
"max": 1.0883234765599756,
"count": 15
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.6600987800000027e-06,
"min": 3.6600987800000027e-06,
"max": 0.00010843506385499998,
"count": 15
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.8300493900000014e-05,
"min": 1.8300493900000014e-05,
"max": 0.0005050503316499999,
"count": 15
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10122,
"min": 0.10122,
"max": 0.13614500000000002,
"count": 15
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5061,
"min": 0.41478000000000004,
"max": 0.66835,
"count": 15
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.087800000000006e-05,
"min": 7.087800000000006e-05,
"max": 0.0018136354999999998,
"count": 15
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003543900000000003,
"min": 0.0003543900000000003,
"max": 0.008450665,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.563636363636363,
"min": 25.945454545454545,
"max": 26.75,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1461.0,
"min": 1148.0,
"max": 1461.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.563636363636363,
"min": 25.945454545454545,
"max": 26.75,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1461.0,
"min": 1148.0,
"max": 1461.0,
"count": 15
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702567993",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702568355"
},
"total": 361.95501640600014,
"count": 1,
"self": 0.4343407610001577,
"children": {
"run_training.setup": {
"total": 0.057348571000147786,
"count": 1,
"self": 0.057348571000147786
},
"TrainerController.start_learning": {
"total": 361.46332707399984,
"count": 1,
"self": 0.4497566689951782,
"children": {
"TrainerController._reset_env": {
"total": 2.7887829889998557,
"count": 1,
"self": 2.7887829889998557
},
"TrainerController.advance": {
"total": 358.136315965005,
"count": 13672,
"self": 0.20839175400919885,
"children": {
"env_step": {
"total": 357.9279242109958,
"count": 13672,
"self": 242.0071293629992,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.70766400899174,
"count": 13672,
"self": 1.1013612359915896,
"children": {
"TorchPolicy.evaluate": {
"total": 114.60630277300015,
"count": 13672,
"self": 114.60630277300015
}
}
},
"workers": {
"total": 0.21313083900486163,
"count": 13672,
"self": 0.0,
"children": {
"worker_root": {
"total": 360.5282995679929,
"count": 13672,
"is_parallel": true,
"self": 190.16598111399776,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022635519999312237,
"count": 1,
"is_parallel": true,
"self": 0.0007432500003687892,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015203019995624345,
"count": 10,
"is_parallel": true,
"self": 0.0015203019995624345
}
}
},
"UnityEnvironment.step": {
"total": 0.03687465300004078,
"count": 1,
"is_parallel": true,
"self": 0.0006080230000407028,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003834459998870443,
"count": 1,
"is_parallel": true,
"self": 0.0003834459998870443
},
"communicator.exchange": {
"total": 0.033839187000012316,
"count": 1,
"is_parallel": true,
"self": 0.033839187000012316
},
"steps_from_proto": {
"total": 0.002043997000100717,
"count": 1,
"is_parallel": true,
"self": 0.0004074850003235042,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016365119997772126,
"count": 10,
"is_parallel": true,
"self": 0.0016365119997772126
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 170.36231845399516,
"count": 13671,
"is_parallel": true,
"self": 7.935667467988196,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.100336375004645,
"count": 13671,
"is_parallel": true,
"self": 4.100336375004645
},
"communicator.exchange": {
"total": 132.48005880298297,
"count": 13671,
"is_parallel": true,
"self": 132.48005880298297
},
"steps_from_proto": {
"total": 25.84625580801935,
"count": 13671,
"is_parallel": true,
"self": 4.88267169401729,
"children": {
"_process_rank_one_or_two_observation": {
"total": 20.96358411400206,
"count": 136710,
"is_parallel": true,
"self": 20.96358411400206
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00023051399966789177,
"count": 1,
"self": 0.00023051399966789177,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 353.9001856590198,
"count": 507449,
"is_parallel": true,
"self": 10.7689251541683,
"children": {
"process_trajectory": {
"total": 183.55084621785159,
"count": 507449,
"is_parallel": true,
"self": 183.09802962485196,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45281659299962485,
"count": 3,
"is_parallel": true,
"self": 0.45281659299962485
}
}
},
"_update_policy": {
"total": 159.58041428699994,
"count": 68,
"is_parallel": true,
"self": 53.81675443401832,
"children": {
"TorchPPOOptimizer.update": {
"total": 105.76365985298162,
"count": 4620,
"is_parallel": true,
"self": 105.76365985298162
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08824093700013691,
"count": 1,
"self": 0.0012164260001554794,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08702451099998143,
"count": 1,
"self": 0.08702451099998143
}
}
}
}
}
}
}