bmistry4's picture
First Push
99f01de
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.6815638542175293,
"min": 1.6815638542175293,
"max": 2.8854310512542725,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 14834.755859375,
"min": 14834.755859375,
"max": 29613.177734375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.06078052520752,
"min": 0.049542609602212906,
"max": 9.06078052520752,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1766.852294921875,
"min": 9.611266136169434,
"max": 1798.76171875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.636363636363637,
"min": 2.3181818181818183,
"max": 19.78181818181818,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 864.0,
"min": 102.0,
"max": 1088.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.636363636363637,
"min": 2.3181818181818183,
"max": 19.78181818181818,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 864.0,
"min": 102.0,
"max": 1088.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.047902557631268314,
"min": 0.04262349455593881,
"max": 0.0540752234779862,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.047902557631268314,
"min": 0.04312242110506958,
"max": 0.1081504469559724,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.25993957880296203,
"min": 0.07573003040855392,
"max": 0.2956721667657819,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.25993957880296203,
"min": 0.07573003040855392,
"max": 0.5913443335315638,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.4880975039999955e-06,
"min": 7.4880975039999955e-06,
"max": 0.000292344002552,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 7.4880975039999955e-06,
"min": 7.4880975039999955e-06,
"max": 0.0005617200127599999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10249600000000002,
"min": 0.10249600000000002,
"max": 0.19744800000000004,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10249600000000002,
"min": 0.10249600000000002,
"max": 0.38724000000000003,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001345503999999999,
"min": 0.0001345503999999999,
"max": 0.004872655200000001,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0001345503999999999,
"min": 0.0001345503999999999,
"max": 0.009363275999999999,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700915081",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700915554"
},
"total": 472.33532615700005,
"count": 1,
"self": 0.7413209720002669,
"children": {
"run_training.setup": {
"total": 0.04883286799986308,
"count": 1,
"self": 0.04883286799986308
},
"TrainerController.start_learning": {
"total": 471.5451723169999,
"count": 1,
"self": 0.5414443469908292,
"children": {
"TrainerController._reset_env": {
"total": 10.355672959000003,
"count": 1,
"self": 10.355672959000003
},
"TrainerController.advance": {
"total": 460.5192671700088,
"count": 18206,
"self": 0.24918094303075122,
"children": {
"env_step": {
"total": 460.27008622697804,
"count": 18206,
"self": 315.7945250790581,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.20187262593709,
"count": 18206,
"self": 1.4519299418675473,
"children": {
"TorchPolicy.evaluate": {
"total": 142.74994268406954,
"count": 18206,
"self": 142.74994268406954
}
}
},
"workers": {
"total": 0.2736885219828764,
"count": 18206,
"self": 0.0,
"children": {
"worker_root": {
"total": 470.25841545399726,
"count": 18206,
"is_parallel": true,
"self": 226.32917361900945,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005292112999995879,
"count": 1,
"is_parallel": true,
"self": 0.0037539209993155964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001538192000680283,
"count": 10,
"is_parallel": true,
"self": 0.001538192000680283
}
}
},
"UnityEnvironment.step": {
"total": 0.03609083000037572,
"count": 1,
"is_parallel": true,
"self": 0.0007031790000837645,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042748700025185826,
"count": 1,
"is_parallel": true,
"self": 0.00042748700025185826
},
"communicator.exchange": {
"total": 0.03240633599989451,
"count": 1,
"is_parallel": true,
"self": 0.03240633599989451
},
"steps_from_proto": {
"total": 0.002553828000145586,
"count": 1,
"is_parallel": true,
"self": 0.0008648120001453208,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016890160000002652,
"count": 10,
"is_parallel": true,
"self": 0.0016890160000002652
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 243.9292418349878,
"count": 18205,
"is_parallel": true,
"self": 10.818593935018725,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.4774415170231805,
"count": 18205,
"is_parallel": true,
"self": 5.4774415170231805
},
"communicator.exchange": {
"total": 193.55020062596213,
"count": 18205,
"is_parallel": true,
"self": 193.55020062596213
},
"steps_from_proto": {
"total": 34.083005756983766,
"count": 18205,
"is_parallel": true,
"self": 6.4222211560131655,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.6607846009706,
"count": 182050,
"is_parallel": true,
"self": 27.6607846009706
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013053300017418223,
"count": 1,
"self": 0.00013053300017418223,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 455.58387632192307,
"count": 598558,
"is_parallel": true,
"self": 12.707212030862593,
"children": {
"process_trajectory": {
"total": 315.7851240030609,
"count": 598558,
"is_parallel": true,
"self": 314.9768425170605,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8082814860003964,
"count": 4,
"is_parallel": true,
"self": 0.8082814860003964
}
}
},
"_update_policy": {
"total": 127.09154028799958,
"count": 38,
"is_parallel": true,
"self": 60.24530387299501,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.84623641500457,
"count": 2202,
"is_parallel": true,
"self": 66.84623641500457
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12865730800012898,
"count": 1,
"self": 0.0011294659998384304,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12752784200029055,
"count": 1,
"self": 0.12752784200029055
}
}
}
}
}
}
}