lrthomps's picture
First Push
e724ef9
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.416517734527588,
"min": 1.416517734527588,
"max": 2.8892271518707275,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 13462.5849609375,
"min": 13462.5849609375,
"max": 29588.755859375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 8.52442741394043,
"min": 0.3737508952617645,
"max": 8.52442741394043,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1662.2633056640625,
"min": 72.50767517089844,
"max": 1734.433837890625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.028723646404008228,
"min": 0.024924018939297334,
"max": 0.034104175839456734,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.11489458561603291,
"min": 0.09969607575718933,
"max": 0.17052087919728368,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2897487441077828,
"min": 0.092327740509063,
"max": 0.30680703967809675,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.1589949764311311,
"min": 0.369310962036252,
"max": 1.5340351983904836,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.0013851600382799997,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102694,
"min": 0.102694,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.410776,
"min": 0.410776,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014443060000000012,
"min": 0.00014443060000000012,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000005,
"min": 0.0005777224000000005,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 17.25,
"min": 2.477272727272727,
"max": 17.25,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 759.0,
"min": 109.0,
"max": 936.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 17.25,
"min": 2.477272727272727,
"max": 17.25,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 759.0,
"min": 109.0,
"max": 936.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684192700",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684193068"
},
"total": 368.21984367100004,
"count": 1,
"self": 0.7460166549999485,
"children": {
"run_training.setup": {
"total": 0.051718013000026986,
"count": 1,
"self": 0.051718013000026986
},
"TrainerController.start_learning": {
"total": 367.42210900300006,
"count": 1,
"self": 0.3410052090196132,
"children": {
"TrainerController._reset_env": {
"total": 4.061940821000007,
"count": 1,
"self": 4.061940821000007
},
"TrainerController.advance": {
"total": 362.8681240579806,
"count": 18192,
"self": 0.35193981200609414,
"children": {
"env_step": {
"total": 276.2515616849794,
"count": 18192,
"self": 226.91327145297828,
"children": {
"SubprocessEnvManager._take_step": {
"total": 49.135497407984985,
"count": 18192,
"self": 1.7745759560009446,
"children": {
"TorchPolicy.evaluate": {
"total": 47.36092145198404,
"count": 18192,
"self": 47.36092145198404
}
}
},
"workers": {
"total": 0.20279282401611454,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 366.0347081649968,
"count": 18192,
"is_parallel": true,
"self": 165.46852209098483,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005550203999973746,
"count": 1,
"is_parallel": true,
"self": 0.004006019999678756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015441840002949903,
"count": 10,
"is_parallel": true,
"self": 0.0015441840002949903
}
}
},
"UnityEnvironment.step": {
"total": 0.0356861369998569,
"count": 1,
"is_parallel": true,
"self": 0.0005494699996688723,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041656500002318353,
"count": 1,
"is_parallel": true,
"self": 0.00041656500002318353
},
"communicator.exchange": {
"total": 0.032838445000152205,
"count": 1,
"is_parallel": true,
"self": 0.032838445000152205
},
"steps_from_proto": {
"total": 0.0018816570000126376,
"count": 1,
"is_parallel": true,
"self": 0.0003467420003744337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015349149996382039,
"count": 10,
"is_parallel": true,
"self": 0.0015349149996382039
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 200.56618607401197,
"count": 18191,
"is_parallel": true,
"self": 9.558649354999716,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.1764665559971945,
"count": 18191,
"is_parallel": true,
"self": 5.1764665559971945
},
"communicator.exchange": {
"total": 154.9135499059987,
"count": 18191,
"is_parallel": true,
"self": 154.9135499059987
},
"steps_from_proto": {
"total": 30.91752025701635,
"count": 18191,
"is_parallel": true,
"self": 5.641797326984488,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.27572293003186,
"count": 181910,
"is_parallel": true,
"self": 25.27572293003186
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 86.26462256099512,
"count": 18192,
"self": 0.41451989099709863,
"children": {
"process_trajectory": {
"total": 22.977643459998262,
"count": 18192,
"self": 22.302249830998335,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6753936289999274,
"count": 4,
"self": 0.6753936289999274
}
}
},
"_update_policy": {
"total": 62.87245920999976,
"count": 90,
"self": 49.08712644800039,
"children": {
"TorchPPOOptimizer.update": {
"total": 13.78533276199937,
"count": 1080,
"self": 13.78533276199937
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.770000476943096e-07,
"count": 1,
"self": 8.770000476943096e-07
},
"TrainerController._save_models": {
"total": 0.15103803799979687,
"count": 1,
"self": 0.0007832219996544154,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15025481600014245,
"count": 1,
"self": 0.15025481600014245
}
}
}
}
}
}
}