sleepytaco's picture
First training of SnowballTarget
8b1da78
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8962484002113342,
"min": 0.8962484002113342,
"max": 2.87442946434021,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8557.3798828125,
"min": 8557.3798828125,
"max": 29563.505859375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.00953197479248,
"min": 0.49117663502693176,
"max": 13.102996826171875,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2536.858642578125,
"min": 95.28826904296875,
"max": 2651.996826171875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06683110973615697,
"min": 0.06409403451421074,
"max": 0.08010147921056213,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2673244389446279,
"min": 0.25637613805684295,
"max": 0.4005073960528106,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1976547395335693,
"min": 0.10510676338761936,
"max": 0.28057960774384294,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7906189581342772,
"min": 0.42042705355047744,
"max": 1.4028980387192147,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.318181818181817,
"min": 3.340909090909091,
"max": 26.181818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1114.0,
"min": 147.0,
"max": 1440.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.318181818181817,
"min": 3.340909090909091,
"max": 26.181818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1114.0,
"min": 147.0,
"max": 1440.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680035460",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680035943"
},
"total": 483.00937043199997,
"count": 1,
"self": 0.39399331299989626,
"children": {
"run_training.setup": {
"total": 0.20874383200001034,
"count": 1,
"self": 0.20874383200001034
},
"TrainerController.start_learning": {
"total": 482.40663328700003,
"count": 1,
"self": 0.5594470440001373,
"children": {
"TrainerController._reset_env": {
"total": 9.28389269600001,
"count": 1,
"self": 9.28389269600001
},
"TrainerController.advance": {
"total": 472.4307311459998,
"count": 18200,
"self": 0.2912989609917531,
"children": {
"env_step": {
"total": 472.13943218500805,
"count": 18200,
"self": 343.22694658500427,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.64054443000404,
"count": 18200,
"self": 1.7628599710148478,
"children": {
"TorchPolicy.evaluate": {
"total": 126.8776844589892,
"count": 18200,
"self": 126.8776844589892
}
}
},
"workers": {
"total": 0.27194116999973517,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 480.6712678489989,
"count": 18200,
"is_parallel": true,
"self": 229.9321487420012,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005948643000010634,
"count": 1,
"is_parallel": true,
"self": 0.0035892420000038783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023594010000067556,
"count": 10,
"is_parallel": true,
"self": 0.0023594010000067556
}
}
},
"UnityEnvironment.step": {
"total": 0.036382700999979534,
"count": 1,
"is_parallel": true,
"self": 0.0004009559999644807,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032798600000205624,
"count": 1,
"is_parallel": true,
"self": 0.00032798600000205624
},
"communicator.exchange": {
"total": 0.0338084890000232,
"count": 1,
"is_parallel": true,
"self": 0.0338084890000232
},
"steps_from_proto": {
"total": 0.001845269999989796,
"count": 1,
"is_parallel": true,
"self": 0.0003810379999720226,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014642320000177733,
"count": 10,
"is_parallel": true,
"self": 0.0014642320000177733
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 250.73911910699772,
"count": 18199,
"is_parallel": true,
"self": 9.927345929008197,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.209604650993214,
"count": 18199,
"is_parallel": true,
"self": 5.209604650993214
},
"communicator.exchange": {
"total": 203.5562748270022,
"count": 18199,
"is_parallel": true,
"self": 203.5562748270022
},
"steps_from_proto": {
"total": 32.04589369999411,
"count": 18199,
"is_parallel": true,
"self": 6.516404288978805,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.529489411015305,
"count": 181990,
"is_parallel": true,
"self": 25.529489411015305
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012087200002497411,
"count": 1,
"self": 0.00012087200002497411,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 468.69904901198794,
"count": 438580,
"is_parallel": true,
"self": 10.828310327977022,
"children": {
"process_trajectory": {
"total": 259.14847604601084,
"count": 438580,
"is_parallel": true,
"self": 257.9896983310108,
"children": {
"RLTrainer._checkpoint": {
"total": 1.158777715000042,
"count": 4,
"is_parallel": true,
"self": 1.158777715000042
}
}
},
"_update_policy": {
"total": 198.72226263800007,
"count": 90,
"is_parallel": true,
"self": 77.26331819500149,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.45894444299859,
"count": 4587,
"is_parallel": true,
"self": 121.45894444299859
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13244152900006156,
"count": 1,
"self": 0.0008390320000444262,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13160249700001714,
"count": 1,
"self": 0.13160249700001714
}
}
}
}
}
}
}