hvnting's picture
First Push
5e66cc8
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9316086173057556,
"min": 0.9316086173057556,
"max": 2.8585565090179443,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8925.7421875,
"min": 8925.7421875,
"max": 29337.365234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.17990493774414,
"min": 0.23431679606437683,
"max": 12.17990493774414,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2375.08154296875,
"min": 45.45745849609375,
"max": 2449.602783203125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06943585079473753,
"min": 0.061051701901334465,
"max": 0.07670961307160848,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27774340317895013,
"min": 0.2489289443542742,
"max": 0.3835480653580424,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20496255604951988,
"min": 0.0927211333079921,
"max": 0.28168739834252526,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8198502241980795,
"min": 0.3708845332319684,
"max": 1.4084369917126263,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.59090909090909,
"min": 2.7954545454545454,
"max": 24.59090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1082.0,
"min": 123.0,
"max": 1317.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.59090909090909,
"min": 2.7954545454545454,
"max": 24.59090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1082.0,
"min": 123.0,
"max": 1317.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701184543",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701185011"
},
"total": 467.693232319,
"count": 1,
"self": 0.4398849409999457,
"children": {
"run_training.setup": {
"total": 0.06632866600000398,
"count": 1,
"self": 0.06632866600000398
},
"TrainerController.start_learning": {
"total": 467.18701871200005,
"count": 1,
"self": 0.5517395429967564,
"children": {
"TrainerController._reset_env": {
"total": 4.361346122999976,
"count": 1,
"self": 4.361346122999976
},
"TrainerController.advance": {
"total": 462.18387665200316,
"count": 18201,
"self": 0.279209562016149,
"children": {
"env_step": {
"total": 461.904667089987,
"count": 18201,
"self": 313.08652284799484,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.55162381799005,
"count": 18201,
"self": 1.4445748449797975,
"children": {
"TorchPolicy.evaluate": {
"total": 147.10704897301025,
"count": 18201,
"self": 147.10704897301025
}
}
},
"workers": {
"total": 0.26652042400212395,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 465.8924163850004,
"count": 18201,
"is_parallel": true,
"self": 228.45265180999684,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007155748000059248,
"count": 1,
"is_parallel": true,
"self": 0.004912665000233574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002243082999825674,
"count": 10,
"is_parallel": true,
"self": 0.002243082999825674
}
}
},
"UnityEnvironment.step": {
"total": 0.042881061000002774,
"count": 1,
"is_parallel": true,
"self": 0.0006833700000470344,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004387559999941004,
"count": 1,
"is_parallel": true,
"self": 0.0004387559999941004
},
"communicator.exchange": {
"total": 0.039560809000022346,
"count": 1,
"is_parallel": true,
"self": 0.039560809000022346
},
"steps_from_proto": {
"total": 0.0021981259999392933,
"count": 1,
"is_parallel": true,
"self": 0.0005424690001518684,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016556569997874249,
"count": 10,
"is_parallel": true,
"self": 0.0016556569997874249
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 237.43976457500355,
"count": 18200,
"is_parallel": true,
"self": 10.412829038987297,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.234190238999986,
"count": 18200,
"is_parallel": true,
"self": 5.234190238999986
},
"communicator.exchange": {
"total": 189.2278774840114,
"count": 18200,
"is_parallel": true,
"self": 189.2278774840114
},
"steps_from_proto": {
"total": 32.56486781300487,
"count": 18200,
"is_parallel": true,
"self": 6.065197672016211,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.499670140988655,
"count": 182000,
"is_parallel": true,
"self": 26.499670140988655
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015087000008406903,
"count": 1,
"self": 0.00015087000008406903,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 458.206574021984,
"count": 491329,
"is_parallel": true,
"self": 10.73199022700544,
"children": {
"process_trajectory": {
"total": 259.7157401799783,
"count": 491329,
"is_parallel": true,
"self": 259.2523597659781,
"children": {
"RLTrainer._checkpoint": {
"total": 0.46338041400019847,
"count": 4,
"is_parallel": true,
"self": 0.46338041400019847
}
}
},
"_update_policy": {
"total": 187.75884361500027,
"count": 90,
"is_parallel": true,
"self": 58.14999535199797,
"children": {
"TorchPPOOptimizer.update": {
"total": 129.6088482630023,
"count": 4587,
"is_parallel": true,
"self": 129.6088482630023
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08990552400007346,
"count": 1,
"self": 0.0008914830000321672,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08901404100004129,
"count": 1,
"self": 0.08901404100004129
}
}
}
}
}
}
}