| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.5790771245956421, | |
| "min": 0.5460456609725952, | |
| "max": 1.4837881326675415, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 17196.2734375, | |
| "min": 16617.26171875, | |
| "max": 45012.1953125, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989889.0, | |
| "min": 29902.0, | |
| "max": 989889.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989889.0, | |
| "min": 29902.0, | |
| "max": 989889.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.18456396460533142, | |
| "min": -0.09519436955451965, | |
| "max": 0.19893212616443634, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 46.32555389404297, | |
| "min": -22.846649169921875, | |
| "max": 50.727691650390625, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.29602763056755066, | |
| "min": -0.017827358096837997, | |
| "max": 0.34487876296043396, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 74.30293273925781, | |
| "min": -4.545976161956787, | |
| "max": 81.73626708984375, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.07008602623301115, | |
| "min": 0.06463045386287074, | |
| "max": 0.07240585912022643, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.981204367262156, | |
| "min": 0.5043134090712096, | |
| "max": 1.0231486340321592, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.03730504111237159, | |
| "min": 0.00013656261948400363, | |
| "max": 0.03730504111237159, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.5222705755732022, | |
| "min": 0.0016387514338080437, | |
| "max": 0.5222705755732022, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.585833185707144e-06, | |
| "min": 7.585833185707144e-06, | |
| "max": 0.00029523505873117143, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.00010620166459990001, | |
| "min": 0.00010620166459990001, | |
| "max": 0.0033822233725923, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10252857857142858, | |
| "min": 0.10252857857142858, | |
| "max": 0.19841168571428572, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4354001, | |
| "min": 1.3888818, | |
| "max": 2.5274077000000004, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0002626049992857143, | |
| "min": 0.0002626049992857143, | |
| "max": 0.009841327402857142, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.0036764699900000003, | |
| "min": 0.0036764699900000003, | |
| "max": 0.11276802923000001, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.012705421075224876, | |
| "min": 0.012705421075224876, | |
| "max": 0.4501160681247711, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.17787589132785797, | |
| "min": 0.17787589132785797, | |
| "max": 3.1508123874664307, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 676.5348837209302, | |
| "min": 620.625, | |
| "max": 999.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 29091.0, | |
| "min": 16701.0, | |
| "max": 33082.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 0.7186139212098233, | |
| "min": -0.9999750521965325, | |
| "max": 0.8408652673266372, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 30.9003986120224, | |
| "min": -31.99920167028904, | |
| "max": 41.20239809900522, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 0.7186139212098233, | |
| "min": -0.9999750521965325, | |
| "max": 0.8408652673266372, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 30.9003986120224, | |
| "min": -31.99920167028904, | |
| "max": 41.20239809900522, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.08839131033773599, | |
| "min": 0.0851667387926552, | |
| "max": 8.483155116438866, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 3.8008263445226476, | |
| "min": 3.8008263445226476, | |
| "max": 144.21363697946072, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1700829735", | |
| "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "1.1.0.dev0", | |
| "mlagents_envs_version": "1.1.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "2.1.0+cu118", | |
| "numpy_version": "1.23.5", | |
| "end_time_seconds": "1700831901" | |
| }, | |
| "total": 2166.9000152040003, | |
| "count": 1, | |
| "self": 0.4767642159999923, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.06226442700017287, | |
| "count": 1, | |
| "self": 0.06226442700017287 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 2166.360986561, | |
| "count": 1, | |
| "self": 1.3725219229031609, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 5.033508770000026, | |
| "count": 1, | |
| "self": 5.033508770000026 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 2159.880578041097, | |
| "count": 63263, | |
| "self": 1.4989307491105137, | |
| "children": { | |
| "env_step": { | |
| "total": 1515.3621111159932, | |
| "count": 63263, | |
| "self": 1381.5920559139786, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 132.91053991303716, | |
| "count": 63263, | |
| "self": 4.70214460709667, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 128.20839530594048, | |
| "count": 62562, | |
| "self": 128.20839530594048 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.8595152889774909, | |
| "count": 63263, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 2161.7624180109606, | |
| "count": 63263, | |
| "is_parallel": true, | |
| "self": 898.9705250040151, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0029475050000655756, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.000796342999819899, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0021511620002456766, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0021511620002456766 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.04763833000015438, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005701320001207932, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0005324570001903339, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005324570001903339 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.044864776999929745, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.044864776999929745 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0016709639999135106, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0003838790012196114, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0012870849986938993, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0012870849986938993 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1262.7918930069454, | |
| "count": 63262, | |
| "is_parallel": true, | |
| "self": 34.155355731905274, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 24.218638860045303, | |
| "count": 63262, | |
| "is_parallel": true, | |
| "self": 24.218638860045303 | |
| }, | |
| "communicator.exchange": { | |
| "total": 1107.5767764639413, | |
| "count": 63262, | |
| "is_parallel": true, | |
| "self": 1107.5767764639413 | |
| }, | |
| "steps_from_proto": { | |
| "total": 96.84112195105354, | |
| "count": 63262, | |
| "is_parallel": true, | |
| "self": 19.45692200300209, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 77.38419994805145, | |
| "count": 506096, | |
| "is_parallel": true, | |
| "self": 77.38419994805145 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 643.0195361759934, | |
| "count": 63263, | |
| "self": 2.5519632150012512, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 125.12820541598694, | |
| "count": 63263, | |
| "self": 124.96889238898575, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.15931302700118977, | |
| "count": 2, | |
| "self": 0.15931302700118977 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 515.3393675450052, | |
| "count": 446, | |
| "self": 310.4269578010426, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 204.9124097439626, | |
| "count": 22758, | |
| "self": 204.9124097439626 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 9.669993232819252e-07, | |
| "count": 1, | |
| "self": 9.669993232819252e-07 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.07437686000048416, | |
| "count": 1, | |
| "self": 0.001315127000452776, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.07306173300003138, | |
| "count": 1, | |
| "self": 0.07306173300003138 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |