marimurta's picture
First training of SnowballTarget
57f73ec
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.728786826133728,
"min": 0.6998227834701538,
"max": 2.841430187225342,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7054.65673828125,
"min": 6666.51171875,
"max": 29130.341796875,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.875195503234863,
"min": 0.23312950134277344,
"max": 13.875195503234863,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2844.4150390625,
"min": 45.22712326049805,
"max": 2844.4150390625,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07006764166640897,
"min": 0.0604873503338347,
"max": 0.0769931508193943,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3503382083320449,
"min": 0.2419494013353388,
"max": 0.3849657540969715,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17441227643513213,
"min": 0.12089742579401527,
"max": 0.2767267723878225,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8720613821756606,
"min": 0.4835897031760611,
"max": 1.3836338619391124,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.545454545454547,
"min": 3.1818181818181817,
"max": 27.477272727272727,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1460.0,
"min": 140.0,
"max": 1489.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.545454545454547,
"min": 3.1818181818181817,
"max": 27.477272727272727,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1460.0,
"min": 140.0,
"max": 1489.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680085567",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680086747"
},
"total": 1180.081384634,
"count": 1,
"self": 0.43754989700005353,
"children": {
"run_training.setup": {
"total": 0.10753246499996294,
"count": 1,
"self": 0.10753246499996294
},
"TrainerController.start_learning": {
"total": 1179.5363022719998,
"count": 1,
"self": 1.597838579019708,
"children": {
"TrainerController._reset_env": {
"total": 10.174212183000009,
"count": 1,
"self": 10.174212183000009
},
"TrainerController.advance": {
"total": 1167.63324830298,
"count": 45477,
"self": 0.814166800946623,
"children": {
"env_step": {
"total": 1166.8190815020334,
"count": 45477,
"self": 845.4297504950063,
"children": {
"SubprocessEnvManager._take_step": {
"total": 320.6337505110463,
"count": 45477,
"self": 6.0276977410430845,
"children": {
"TorchPolicy.evaluate": {
"total": 314.6060527700032,
"count": 45477,
"self": 314.6060527700032
}
}
},
"workers": {
"total": 0.7555804959808938,
"count": 45477,
"self": 0.0,
"children": {
"worker_root": {
"total": 1175.4574385680069,
"count": 45477,
"is_parallel": true,
"self": 541.831436301016,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005623278000030041,
"count": 1,
"is_parallel": true,
"self": 0.004122536000068067,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015007419999619742,
"count": 10,
"is_parallel": true,
"self": 0.0015007419999619742
}
}
},
"UnityEnvironment.step": {
"total": 0.04641670499995598,
"count": 1,
"is_parallel": true,
"self": 0.0005604069999662897,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00028357299993331253,
"count": 1,
"is_parallel": true,
"self": 0.00028357299993331253
},
"communicator.exchange": {
"total": 0.04377142299995285,
"count": 1,
"is_parallel": true,
"self": 0.04377142299995285
},
"steps_from_proto": {
"total": 0.001801302000103533,
"count": 1,
"is_parallel": true,
"self": 0.0003723500003616209,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001428951999741912,
"count": 10,
"is_parallel": true,
"self": 0.001428951999741912
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 633.6260022669909,
"count": 45476,
"is_parallel": true,
"self": 24.73291101306438,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.992862852971257,
"count": 45476,
"is_parallel": true,
"self": 12.992862852971257
},
"communicator.exchange": {
"total": 516.2120104569789,
"count": 45476,
"is_parallel": true,
"self": 516.2120104569789
},
"steps_from_proto": {
"total": 79.68821794397638,
"count": 45476,
"is_parallel": true,
"self": 16.22073598097927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.46748196299711,
"count": 454760,
"is_parallel": true,
"self": 63.46748196299711
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.430800006753998e-05,
"count": 1,
"self": 3.430800006753998e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1159.390414065035,
"count": 1058516,
"is_parallel": true,
"self": 24.832030384970494,
"children": {
"process_trajectory": {
"total": 647.3593765840659,
"count": 1058516,
"is_parallel": true,
"self": 644.9969564350658,
"children": {
"RLTrainer._checkpoint": {
"total": 2.3624201490000587,
"count": 10,
"is_parallel": true,
"self": 2.3624201490000587
}
}
},
"_update_policy": {
"total": 487.1990070959986,
"count": 227,
"is_parallel": true,
"self": 174.22708252500024,
"children": {
"TorchPPOOptimizer.update": {
"total": 312.97192457099834,
"count": 11574,
"is_parallel": true,
"self": 312.97192457099834
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13096889899998132,
"count": 1,
"self": 0.0009496620000390976,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13001923699994222,
"count": 1,
"self": 0.13001923699994222
}
}
}
}
}
}
}