JiemingYou's picture
First Push
fe3b7d5
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.08340322971344,
"min": 1.08340322971344,
"max": 2.8826873302459717,
"count": 39
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10427.755859375,
"min": 10329.1357421875,
"max": 29489.890625,
"count": 39
},
"SnowballTarget.Step.mean": {
"value": 389976.0,
"min": 9952.0,
"max": 389976.0,
"count": 39
},
"SnowballTarget.Step.sum": {
"value": 389976.0,
"min": 9952.0,
"max": 389976.0,
"count": 39
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.508840560913086,
"min": 0.17293517291545868,
"max": 13.508840560913086,
"count": 39
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2769.312255859375,
"min": 33.54942321777344,
"max": 2769.312255859375,
"count": 39
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 39
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 39
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.472727272727273,
"min": 2.6818181818181817,
"max": 27.163636363636364,
"count": 39
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1456.0,
"min": 118.0,
"max": 1494.0,
"count": 39
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.472727272727273,
"min": 2.6818181818181817,
"max": 27.163636363636364,
"count": 39
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1456.0,
"min": 118.0,
"max": 1494.0,
"count": 39
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04579806241337351,
"min": 0.042137691017890735,
"max": 0.05587920360339922,
"count": 39
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.09159612482674702,
"min": 0.04441482705557671,
"max": 0.11175840720679844,
"count": 39
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21136931223851263,
"min": 0.08879113964962237,
"max": 0.3049295004118573,
"count": 39
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.42273862447702526,
"min": 0.08879113964962237,
"max": 0.6098590008237146,
"count": 39
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.881527706159999e-05,
"min": 6.881527706159999e-05,
"max": 0.0002965152011615999,
"count": 39
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00013763055412319998,
"min": 8.705767098079999e-05,
"max": 0.0005836224054592,
"count": 39
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.12293840000000002,
"min": 0.12293840000000002,
"max": 0.19883840000000005,
"count": 39
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.24587680000000003,
"min": 0.1290192,
"max": 0.39454080000000014,
"count": 39
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0011546261599999998,
"min": 0.0011546261599999998,
"max": 0.004942036160000001,
"count": 39
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0023092523199999995,
"min": 0.0014580580800000003,
"max": 0.00972758592,
"count": 39
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 39
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 39
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693828000",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693828912"
},
"total": 911.980597828,
"count": 1,
"self": 0.005914645000075325,
"children": {
"run_training.setup": {
"total": 0.04278831099998115,
"count": 1,
"self": 0.04278831099998115
},
"TrainerController.start_learning": {
"total": 911.9318948719999,
"count": 1,
"self": 2.043120440011876,
"children": {
"TrainerController._reset_env": {
"total": 4.138921319000019,
"count": 1,
"self": 4.138921319000019
},
"TrainerController.advance": {
"total": 905.5604848669878,
"count": 35799,
"self": 0.5655543749762728,
"children": {
"env_step": {
"total": 904.9949304920116,
"count": 35799,
"self": 658.5273145520051,
"children": {
"SubprocessEnvManager._take_step": {
"total": 245.9122390499872,
"count": 35799,
"self": 3.6151985529808144,
"children": {
"TorchPolicy.evaluate": {
"total": 242.29704049700638,
"count": 35799,
"self": 242.29704049700638
}
}
},
"workers": {
"total": 0.5553768900192608,
"count": 35798,
"self": 0.0,
"children": {
"worker_root": {
"total": 907.6885345479939,
"count": 35798,
"is_parallel": true,
"self": 416.8862383749789,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002251466999950935,
"count": 1,
"is_parallel": true,
"self": 0.0006050779996940037,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016463890002569315,
"count": 10,
"is_parallel": true,
"self": 0.0016463890002569315
}
}
},
"UnityEnvironment.step": {
"total": 0.04990317800002231,
"count": 1,
"is_parallel": true,
"self": 0.0004010219998917819,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003591890000507192,
"count": 1,
"is_parallel": true,
"self": 0.0003591890000507192
},
"communicator.exchange": {
"total": 0.04732117700007166,
"count": 1,
"is_parallel": true,
"self": 0.04732117700007166
},
"steps_from_proto": {
"total": 0.00182179000000815,
"count": 1,
"is_parallel": true,
"self": 0.000400251000201024,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001421538999807126,
"count": 10,
"is_parallel": true,
"self": 0.001421538999807126
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 490.80229617301495,
"count": 35797,
"is_parallel": true,
"self": 20.747673110991173,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.440433202006375,
"count": 35797,
"is_parallel": true,
"self": 10.440433202006375
},
"communicator.exchange": {
"total": 389.1967887590297,
"count": 35797,
"is_parallel": true,
"self": 389.1967887590297
},
"steps_from_proto": {
"total": 70.41740110098772,
"count": 35797,
"is_parallel": true,
"self": 12.805089449956995,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.61231165103072,
"count": 357970,
"is_parallel": true,
"self": 57.61231165103072
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.116200006545114e-05,
"count": 1,
"self": 6.116200006545114e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 896.3614350979441,
"count": 1047177,
"is_parallel": true,
"self": 22.711233324866043,
"children": {
"process_trajectory": {
"total": 597.0070591190785,
"count": 1047178,
"is_parallel": true,
"self": 594.2882468050781,
"children": {
"RLTrainer._checkpoint": {
"total": 2.718812314000388,
"count": 7,
"is_parallel": true,
"self": 2.718812314000388
}
}
},
"_update_policy": {
"total": 276.6431426539996,
"count": 71,
"is_parallel": true,
"self": 148.56175756900097,
"children": {
"TorchPPOOptimizer.update": {
"total": 128.0813850849986,
"count": 4459,
"is_parallel": true,
"self": 128.0813850849986
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.18930708400012008,
"count": 1,
"self": 0.001591046000157803,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18771603799996228,
"count": 1,
"self": 0.18771603799996228
}
}
}
}
}
}
}