cfisicaro's picture
First training of SnowballTarget
b3e3f29
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9186777472496033,
"min": 0.9121712446212769,
"max": 2.864837169647217,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8761.4296875,
"min": 8761.4296875,
"max": 29338.796875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.917972564697266,
"min": 0.32342132925987244,
"max": 12.923827171325684,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2519.004638671875,
"min": 62.743736267089844,
"max": 2636.460693359375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0631172218493114,
"min": 0.05969783245527651,
"max": 0.0749817720650896,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2524688873972456,
"min": 0.24650352032496775,
"max": 0.374908860325448,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21504478463355234,
"min": 0.10118512385501983,
"max": 0.27003507304425334,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8601791385342094,
"min": 0.40474049542007934,
"max": 1.3501753652212667,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.295454545454547,
"min": 3.0,
"max": 25.327272727272728,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1113.0,
"min": 132.0,
"max": 1393.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.295454545454547,
"min": 3.0,
"max": 25.327272727272728,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1113.0,
"min": 132.0,
"max": 1393.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676246122",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676246695"
},
"total": 572.99773787,
"count": 1,
"self": 0.6473980199999687,
"children": {
"run_training.setup": {
"total": 0.12728618599999209,
"count": 1,
"self": 0.12728618599999209
},
"TrainerController.start_learning": {
"total": 572.2230536640001,
"count": 1,
"self": 0.784830557998589,
"children": {
"TrainerController._reset_env": {
"total": 6.493766937999965,
"count": 1,
"self": 6.493766937999965
},
"TrainerController.advance": {
"total": 564.8047079930016,
"count": 18201,
"self": 0.3999151519979023,
"children": {
"env_step": {
"total": 564.4047928410037,
"count": 18201,
"self": 440.4401193100038,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.57503368399495,
"count": 18201,
"self": 2.147556427988036,
"children": {
"TorchPolicy.evaluate": {
"total": 121.42747725600691,
"count": 18201,
"self": 19.438991813008442,
"children": {
"TorchPolicy.sample_actions": {
"total": 101.98848544299847,
"count": 18201,
"self": 101.98848544299847
}
}
}
}
},
"workers": {
"total": 0.38963984700490073,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 570.2879792139963,
"count": 18201,
"is_parallel": true,
"self": 252.43798386500083,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.012106453000001238,
"count": 1,
"is_parallel": true,
"self": 0.004216138000003866,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007890314999997372,
"count": 10,
"is_parallel": true,
"self": 0.007890314999997372
}
}
},
"UnityEnvironment.step": {
"total": 0.0522557900000038,
"count": 1,
"is_parallel": true,
"self": 0.0006860380000262012,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004003070000067055,
"count": 1,
"is_parallel": true,
"self": 0.0004003070000067055
},
"communicator.exchange": {
"total": 0.04889168099998642,
"count": 1,
"is_parallel": true,
"self": 0.04889168099998642
},
"steps_from_proto": {
"total": 0.0022777639999844723,
"count": 1,
"is_parallel": true,
"self": 0.0004976170000077218,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017801469999767505,
"count": 10,
"is_parallel": true,
"self": 0.0017801469999767505
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 317.8499953489955,
"count": 18200,
"is_parallel": true,
"self": 13.409939840991115,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.158058926003889,
"count": 18200,
"is_parallel": true,
"self": 7.158058926003889
},
"communicator.exchange": {
"total": 253.6331783730049,
"count": 18200,
"is_parallel": true,
"self": 253.6331783730049
},
"steps_from_proto": {
"total": 43.64881820899558,
"count": 18200,
"is_parallel": true,
"self": 9.883749361952027,
"children": {
"_process_rank_one_or_two_observation": {
"total": 33.76506884704355,
"count": 182000,
"is_parallel": true,
"self": 33.76506884704355
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001628180000352586,
"count": 1,
"self": 0.0001628180000352586,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 560.025692885009,
"count": 551921,
"is_parallel": true,
"self": 14.107347296025864,
"children": {
"process_trajectory": {
"total": 309.67673241198383,
"count": 551921,
"is_parallel": true,
"self": 308.422957923984,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2537744879998627,
"count": 4,
"is_parallel": true,
"self": 1.2537744879998627
}
}
},
"_update_policy": {
"total": 236.24161317699935,
"count": 90,
"is_parallel": true,
"self": 76.30433354700631,
"children": {
"TorchPPOOptimizer.update": {
"total": 159.93727962999304,
"count": 4587,
"is_parallel": true,
"self": 159.93727962999304
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1395853569999872,
"count": 1,
"self": 0.0010833049998382194,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13850205200014898,
"count": 1,
"self": 0.13850205200014898
}
}
}
}
}
}
}