Pranjalya's picture
Snowball target PPO
d4f5f3e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8518326282501221,
"min": 0.8518326282501221,
"max": 2.8495330810546875,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8161.408203125,
"min": 8161.408203125,
"max": 29370.13671875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.770134925842285,
"min": 0.36448028683662415,
"max": 12.770134925842285,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2490.17626953125,
"min": 70.70917510986328,
"max": 2587.640380859375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06467329547826119,
"min": 0.06275828136716097,
"max": 0.07470308090126435,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25869318191304475,
"min": 0.25869318191304475,
"max": 0.3735154045063217,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.23782308626116494,
"min": 0.12376709421868345,
"max": 0.2997552422076172,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9512923450446598,
"min": 0.4950683768747338,
"max": 1.4987762110380858,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.886363636363637,
"min": 3.340909090909091,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1095.0,
"min": 147.0,
"max": 1365.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.886363636363637,
"min": 3.340909090909091,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1095.0,
"min": 147.0,
"max": 1365.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682525503",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682525967"
},
"total": 464.595271684,
"count": 1,
"self": 0.3851814220000733,
"children": {
"run_training.setup": {
"total": 0.11561458099998845,
"count": 1,
"self": 0.11561458099998845
},
"TrainerController.start_learning": {
"total": 464.09447568099995,
"count": 1,
"self": 0.6034153600101604,
"children": {
"TrainerController._reset_env": {
"total": 3.9530402440000216,
"count": 1,
"self": 3.9530402440000216
},
"TrainerController.advance": {
"total": 459.4018985789897,
"count": 18206,
"self": 0.3013461399892208,
"children": {
"env_step": {
"total": 459.10055243900047,
"count": 18206,
"self": 338.3119624399976,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.50113802399972,
"count": 18206,
"self": 1.8572762569957035,
"children": {
"TorchPolicy.evaluate": {
"total": 118.64386176700401,
"count": 18206,
"self": 118.64386176700401
}
}
},
"workers": {
"total": 0.2874519750031368,
"count": 18206,
"self": 0.0,
"children": {
"worker_root": {
"total": 462.6245877310007,
"count": 18206,
"is_parallel": true,
"self": 212.07206727600686,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004919147999999041,
"count": 1,
"is_parallel": true,
"self": 0.003460539999991852,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014586080000071888,
"count": 10,
"is_parallel": true,
"self": 0.0014586080000071888
}
}
},
"UnityEnvironment.step": {
"total": 0.034579640999993444,
"count": 1,
"is_parallel": true,
"self": 0.0005512349999889921,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042765799997823706,
"count": 1,
"is_parallel": true,
"self": 0.00042765799997823706
},
"communicator.exchange": {
"total": 0.031682203999992,
"count": 1,
"is_parallel": true,
"self": 0.031682203999992
},
"steps_from_proto": {
"total": 0.0019185440000342169,
"count": 1,
"is_parallel": true,
"self": 0.00037958399991566694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00153896000011855,
"count": 10,
"is_parallel": true,
"self": 0.00153896000011855
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 250.55252045499384,
"count": 18205,
"is_parallel": true,
"self": 9.9955147849804,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.4810595440028465,
"count": 18205,
"is_parallel": true,
"self": 5.4810595440028465
},
"communicator.exchange": {
"total": 203.26306820100746,
"count": 18205,
"is_parallel": true,
"self": 203.26306820100746
},
"steps_from_proto": {
"total": 31.812877925003136,
"count": 18205,
"is_parallel": true,
"self": 6.32611113402811,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.486766790975025,
"count": 182050,
"is_parallel": true,
"self": 25.486766790975025
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001590530000612489,
"count": 1,
"self": 0.0001590530000612489,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 456.08622128101143,
"count": 405122,
"is_parallel": true,
"self": 9.729415566024159,
"children": {
"process_trajectory": {
"total": 250.28537865798722,
"count": 405122,
"is_parallel": true,
"self": 249.08417568298722,
"children": {
"RLTrainer._checkpoint": {
"total": 1.201202975000001,
"count": 4,
"is_parallel": true,
"self": 1.201202975000001
}
}
},
"_update_policy": {
"total": 196.07142705700005,
"count": 90,
"is_parallel": true,
"self": 76.06520485300388,
"children": {
"TorchPPOOptimizer.update": {
"total": 120.00622220399617,
"count": 4584,
"is_parallel": true,
"self": 120.00622220399617
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1359624450000183,
"count": 1,
"self": 0.0009396799999876748,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13502276500003063,
"count": 1,
"self": 0.13502276500003063
}
}
}
}
}
}
}