tmilushev's picture
First training of SnowballTarget
4e0d20e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9376963376998901,
"min": 0.9300739169120789,
"max": 2.8731753826141357,
"count": 25
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8922.1806640625,
"min": 8922.1806640625,
"max": 29424.189453125,
"count": 25
},
"SnowballTarget.Step.mean": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Step.sum": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.089237213134766,
"min": 0.3212386965751648,
"max": 13.089237213134766,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2539.31201171875,
"min": 62.32030487060547,
"max": 2665.495849609375,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06728952192290807,
"min": 0.06308894150550295,
"max": 0.07964882908495116,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2691580876916323,
"min": 0.25287049256068855,
"max": 0.3716081314327597,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17846819626934388,
"min": 0.11971202451397903,
"max": 0.28324033159251305,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7138727850773755,
"min": 0.4788480980559161,
"max": 1.349308637135169,
"count": 25
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.7456980848e-06,
"min": 5.7456980848e-06,
"max": 0.0002935056021648,
"count": 25
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.29827923392e-05,
"min": 2.29827923392e-05,
"max": 0.0014081280306239997,
"count": 25
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10191520000000001,
"min": 0.10191520000000001,
"max": 0.19783520000000002,
"count": 25
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40766080000000005,
"min": 0.40766080000000005,
"max": 0.9693760000000002,
"count": 25
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010556848000000002,
"min": 0.00010556848000000002,
"max": 0.004891976480000001,
"count": 25
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004222739200000001,
"min": 0.0004222739200000001,
"max": 0.023471862399999998,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.818181818181817,
"min": 3.1818181818181817,
"max": 25.818181818181817,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1136.0,
"min": 140.0,
"max": 1408.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.818181818181817,
"min": 3.1818181818181817,
"max": 25.818181818181817,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1136.0,
"min": 140.0,
"max": 1408.0,
"count": 25
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673645781",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673646306"
},
"total": 524.877929789,
"count": 1,
"self": 0.38300075899996955,
"children": {
"run_training.setup": {
"total": 0.11092244600001777,
"count": 1,
"self": 0.11092244600001777
},
"TrainerController.start_learning": {
"total": 524.384006584,
"count": 1,
"self": 0.6116441220037814,
"children": {
"TrainerController._reset_env": {
"total": 9.338942393000025,
"count": 1,
"self": 9.338942393000025
},
"TrainerController.advance": {
"total": 514.3093548839963,
"count": 22738,
"self": 0.3248505910057702,
"children": {
"env_step": {
"total": 513.9845042929906,
"count": 22738,
"self": 332.57440829100454,
"children": {
"SubprocessEnvManager._take_step": {
"total": 181.07974173599865,
"count": 22738,
"self": 1.6781393499991282,
"children": {
"TorchPolicy.evaluate": {
"total": 179.40160238599952,
"count": 22738,
"self": 39.5891862970019,
"children": {
"TorchPolicy.sample_actions": {
"total": 139.81241608899762,
"count": 22738,
"self": 139.81241608899762
}
}
}
}
},
"workers": {
"total": 0.33035426598735285,
"count": 22738,
"self": 0.0,
"children": {
"worker_root": {
"total": 522.8971083430015,
"count": 22738,
"is_parallel": true,
"self": 255.4628326150003,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0061574839999707365,
"count": 1,
"is_parallel": true,
"self": 0.003497293999885187,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026601900000855494,
"count": 10,
"is_parallel": true,
"self": 0.0026601900000855494
}
}
},
"UnityEnvironment.step": {
"total": 0.03657978699999376,
"count": 1,
"is_parallel": true,
"self": 0.0005325409999841213,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002740220000418958,
"count": 1,
"is_parallel": true,
"self": 0.0002740220000418958
},
"communicator.exchange": {
"total": 0.033981750999998894,
"count": 1,
"is_parallel": true,
"self": 0.033981750999998894
},
"steps_from_proto": {
"total": 0.0017914729999688461,
"count": 1,
"is_parallel": true,
"self": 0.00040951399995492466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013819590000139215,
"count": 10,
"is_parallel": true,
"self": 0.0013819590000139215
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 267.4342757280012,
"count": 22737,
"is_parallel": true,
"self": 10.263900924998893,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.157725337998841,
"count": 22737,
"is_parallel": true,
"self": 6.157725337998841
},
"communicator.exchange": {
"total": 213.91262657700076,
"count": 22737,
"is_parallel": true,
"self": 213.91262657700076
},
"steps_from_proto": {
"total": 37.100022888002684,
"count": 22737,
"is_parallel": true,
"self": 7.944366144985338,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.155656743017346,
"count": 227370,
"is_parallel": true,
"self": 29.155656743017346
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.1212999917661364e-05,
"count": 1,
"self": 4.1212999917661364e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 510.77070809897015,
"count": 422605,
"is_parallel": true,
"self": 10.931379730002504,
"children": {
"process_trajectory": {
"total": 289.44334976596724,
"count": 422605,
"is_parallel": true,
"self": 288.5160470159672,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9273027500000239,
"count": 5,
"is_parallel": true,
"self": 0.9273027500000239
}
}
},
"_update_policy": {
"total": 210.3959786030004,
"count": 113,
"is_parallel": true,
"self": 54.87017794499593,
"children": {
"TorchPPOOptimizer.update": {
"total": 155.52580065800447,
"count": 5760,
"is_parallel": true,
"self": 155.52580065800447
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1240239719999181,
"count": 1,
"self": 0.0008899249999103631,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12313404700000774,
"count": 1,
"self": 0.12313404700000774
}
}
}
}
}
}
}