dotunadegbite's picture
First run
9966fa4
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.892038881778717,
"min": 0.892038881778717,
"max": 2.853081703186035,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8517.1875,
"min": 8517.1875,
"max": 29218.41015625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.168224334716797,
"min": 0.4111616909503937,
"max": 13.184819221496582,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2567.8037109375,
"min": 79.76536560058594,
"max": 2689.703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06960944524334456,
"min": 0.06563399338242562,
"max": 0.07279902097272606,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27843778097337823,
"min": 0.2625359735297025,
"max": 0.3639951048636303,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17255926596767762,
"min": 0.12072218996404176,
"max": 0.2944142708883566,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6902370638707105,
"min": 0.482888759856167,
"max": 1.2868026956039316,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000009e-06,
"min": 8.082097306000009e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.2328389224000035e-05,
"min": 3.2328389224000035e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269399999999998,
"min": 0.10269399999999998,
"max": 0.197294,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4107759999999999,
"min": 0.4107759999999999,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.004864970599999999,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.59090909090909,
"min": 3.2045454545454546,
"max": 25.89090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1126.0,
"min": 141.0,
"max": 1424.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.59090909090909,
"min": 3.2045454545454546,
"max": 25.89090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1126.0,
"min": 141.0,
"max": 1424.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677214323",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677214831"
},
"total": 507.8725578900003,
"count": 1,
"self": 0.3858022100002927,
"children": {
"run_training.setup": {
"total": 0.11497642699987409,
"count": 1,
"self": 0.11497642699987409
},
"TrainerController.start_learning": {
"total": 507.3717792530001,
"count": 1,
"self": 0.505777959995612,
"children": {
"TrainerController._reset_env": {
"total": 9.831027076999817,
"count": 1,
"self": 9.831027076999817
},
"TrainerController.advance": {
"total": 496.92010923600446,
"count": 18200,
"self": 0.2607775159749508,
"children": {
"env_step": {
"total": 496.6593317200295,
"count": 18200,
"self": 361.89644599901067,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.50254242001256,
"count": 18200,
"self": 1.4338672299713835,
"children": {
"TorchPolicy.evaluate": {
"total": 133.06867519004118,
"count": 18200,
"self": 29.480927512062863,
"children": {
"TorchPolicy.sample_actions": {
"total": 103.58774767797831,
"count": 18200,
"self": 103.58774767797831
}
}
}
}
},
"workers": {
"total": 0.2603433010062872,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 505.83003653701917,
"count": 18200,
"is_parallel": true,
"self": 275.3312124290683,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008388594000280136,
"count": 1,
"is_parallel": true,
"self": 0.0045375670010798785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0038510269992002577,
"count": 10,
"is_parallel": true,
"self": 0.0038510269992002577
}
}
},
"UnityEnvironment.step": {
"total": 0.04066075600030672,
"count": 1,
"is_parallel": true,
"self": 0.0004776020000463177,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003173640002387401,
"count": 1,
"is_parallel": true,
"self": 0.0003173640002387401
},
"communicator.exchange": {
"total": 0.03801045399995928,
"count": 1,
"is_parallel": true,
"self": 0.03801045399995928
},
"steps_from_proto": {
"total": 0.0018553360000623798,
"count": 1,
"is_parallel": true,
"self": 0.00042897699950117385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001426359000561206,
"count": 10,
"is_parallel": true,
"self": 0.001426359000561206
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 230.4988241079509,
"count": 18199,
"is_parallel": true,
"self": 9.344617819952418,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.259446912963085,
"count": 18199,
"is_parallel": true,
"self": 5.259446912963085
},
"communicator.exchange": {
"total": 186.04022162900765,
"count": 18199,
"is_parallel": true,
"self": 186.04022162900765
},
"steps_from_proto": {
"total": 29.854537746027745,
"count": 18199,
"is_parallel": true,
"self": 6.525459191944719,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.329078554083026,
"count": 181990,
"is_parallel": true,
"self": 23.329078554083026
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.631800018411013e-05,
"count": 1,
"self": 3.631800018411013e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 493.7006018827956,
"count": 389016,
"is_parallel": true,
"self": 9.577440201870559,
"children": {
"process_trajectory": {
"total": 248.25652972392572,
"count": 389016,
"is_parallel": true,
"self": 247.52132248892622,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7352072349995069,
"count": 4,
"is_parallel": true,
"self": 0.7352072349995069
}
}
},
"_update_policy": {
"total": 235.8666319569993,
"count": 90,
"is_parallel": true,
"self": 94.20312565501308,
"children": {
"TorchPPOOptimizer.update": {
"total": 141.66350630198622,
"count": 7645,
"is_parallel": true,
"self": 141.66350630198622
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11482866200003627,
"count": 1,
"self": 0.0009673330000623537,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11386132899997392,
"count": 1,
"self": 0.11386132899997392
}
}
}
}
}
}
}