messham's picture
2m steps Push
b390eee
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5073500871658325,
"min": 0.44612881541252136,
"max": 2.8709447383880615,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5218.095703125,
"min": 4313.69921875,
"max": 29432.92578125,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.333270072937012,
"min": 0.2767333686351776,
"max": 14.410740852355957,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2938.3203125,
"min": 53.68627166748047,
"max": 2952.8720703125,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07231564274441232,
"min": 0.06043865533295092,
"max": 0.07629179319334856,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3615782137220616,
"min": 0.24175462133180367,
"max": 0.3814589659667428,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.14176157613595325,
"min": 0.11787958366239407,
"max": 0.2759304280374565,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7088078806797663,
"min": 0.47151833464957627,
"max": 1.3280392432329702,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032997656000039e-07,
"min": 7.032997656000039e-07,
"max": 0.00029918820027059994,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.5164988280000196e-06,
"min": 3.5164988280000196e-06,
"max": 0.0014885160038279998,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10023440000000002,
"min": 0.10023440000000002,
"max": 0.1997294,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5011720000000001,
"min": 0.4029176,
"max": 0.996172,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000067e-05,
"min": 2.1696560000000067e-05,
"max": 0.004986497059999999,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010848280000000034,
"min": 0.00010848280000000034,
"max": 0.024808982800000004,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.036363636363635,
"min": 3.2045454545454546,
"max": 28.545454545454547,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1542.0,
"min": 141.0,
"max": 1562.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.036363636363635,
"min": 3.2045454545454546,
"max": 28.545454545454547,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1542.0,
"min": 141.0,
"max": 1562.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680947605",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680953821"
},
"total": 6216.0523109509995,
"count": 1,
"self": 0.6432689249995747,
"children": {
"run_training.setup": {
"total": 0.13959167000007255,
"count": 1,
"self": 0.13959167000007255
},
"TrainerController.start_learning": {
"total": 6215.269450356,
"count": 1,
"self": 9.836011857989433,
"children": {
"TrainerController._reset_env": {
"total": 0.8482601200000772,
"count": 1,
"self": 0.8482601200000772
},
"TrainerController.advance": {
"total": 6204.421568810009,
"count": 181873,
"self": 5.122590321077041,
"children": {
"env_step": {
"total": 6199.298978488932,
"count": 181873,
"self": 5046.155034286932,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1148.3035975169971,
"count": 181873,
"self": 29.348385014975065,
"children": {
"TorchPolicy.evaluate": {
"total": 1118.955212502022,
"count": 181873,
"self": 1118.955212502022
}
}
},
"workers": {
"total": 4.840346685002714,
"count": 181873,
"self": 0.0,
"children": {
"worker_root": {
"total": 6191.093568429078,
"count": 181873,
"is_parallel": true,
"self": 2630.802529453923,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033474710000973573,
"count": 1,
"is_parallel": true,
"self": 0.0011056989999360667,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022417720001612906,
"count": 10,
"is_parallel": true,
"self": 0.0022417720001612906
}
}
},
"UnityEnvironment.step": {
"total": 0.04783905599992977,
"count": 1,
"is_parallel": true,
"self": 0.0007138229998417955,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003933509999569651,
"count": 1,
"is_parallel": true,
"self": 0.0003933509999569651
},
"communicator.exchange": {
"total": 0.04408974600005422,
"count": 1,
"is_parallel": true,
"self": 0.04408974600005422
},
"steps_from_proto": {
"total": 0.002642136000076789,
"count": 1,
"is_parallel": true,
"self": 0.0005435770003714424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020985589997053467,
"count": 10,
"is_parallel": true,
"self": 0.0020985589997053467
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3560.291038975155,
"count": 181872,
"is_parallel": true,
"self": 146.50199550708567,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.8901836070695,
"count": 181872,
"is_parallel": true,
"self": 76.8901836070695
},
"communicator.exchange": {
"total": 2873.9393243660525,
"count": 181872,
"is_parallel": true,
"self": 2873.9393243660525
},
"steps_from_proto": {
"total": 462.95953549494766,
"count": 181872,
"is_parallel": true,
"self": 96.8290968019362,
"children": {
"_process_rank_one_or_two_observation": {
"total": 366.13043869301146,
"count": 1818720,
"is_parallel": true,
"self": 366.13043869301146
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0007093419999364414,
"count": 1,
"self": 0.0007093419999364414,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6141.456610045923,
"count": 7205549,
"is_parallel": true,
"self": 176.47311314596482,
"children": {
"process_trajectory": {
"total": 3298.8974320339503,
"count": 7205549,
"is_parallel": true,
"self": 3285.0486143369544,
"children": {
"RLTrainer._checkpoint": {
"total": 13.848817696996093,
"count": 40,
"is_parallel": true,
"self": 13.848817696996093
}
}
},
"_update_policy": {
"total": 2666.086064866008,
"count": 909,
"is_parallel": true,
"self": 891.3026456340563,
"children": {
"TorchPPOOptimizer.update": {
"total": 1774.783419231952,
"count": 46353,
"is_parallel": true,
"self": 1774.783419231952
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.16290022600060183,
"count": 1,
"self": 0.0013970910003990866,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16150313500020275,
"count": 1,
"self": 0.16150313500020275
}
}
}
}
}
}
}