PyramidsRND / run_logs /timers.json
jimmyofdoom's picture
First Push
c19d832
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.30965563654899597,
"min": 0.30437004566192627,
"max": 1.4625712633132935,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9413.53125,
"min": 8976.1455078125,
"max": 44368.5625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989894.0,
"min": 29995.0,
"max": 989894.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989894.0,
"min": 29995.0,
"max": 989894.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.529508113861084,
"min": -0.08502715080976486,
"max": 0.5423809289932251,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 144.5557098388672,
"min": -20.236461639404297,
"max": 151.32427978515625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007025334518402815,
"min": -0.042041316628456116,
"max": 0.4310765266418457,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.9179162979125977,
"min": -10.930742263793945,
"max": 110.78666687011719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0680888175055941,
"min": 0.06588538287213028,
"max": 0.07471546027956347,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9532434450783174,
"min": 0.5977236822365077,
"max": 1.0556617068941705,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014555690290474987,
"min": 0.0008786138701830197,
"max": 0.02586244462694998,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20377966406664982,
"min": 0.011421980312379256,
"max": 0.3879366694042497,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.72835456677143e-06,
"min": 7.72835456677143e-06,
"max": 0.0002952171390942875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010819696393480001,
"min": 0.00010819696393480001,
"max": 0.0037579873473375996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257608571428571,
"min": 0.10257608571428571,
"max": 0.19840571250000003,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360652,
"min": 1.4360652,
"max": 2.6526624000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002673509628571429,
"min": 0.0002673509628571429,
"max": 0.00984073067875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037429134800000007,
"min": 0.0037429134800000007,
"max": 0.12528097376,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010857143439352512,
"min": 0.010857143439352512,
"max": 0.4915505051612854,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15200001001358032,
"min": 0.15200001001358032,
"max": 3.932404041290283,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 373.8125,
"min": 336.1931818181818,
"max": 998.4230769230769,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29905.0,
"min": 16794.0,
"max": 34134.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5011624746024608,
"min": -0.9304667180686286,
"max": 1.6410727050494065,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 120.09299796819687,
"min": -30.705401696264744,
"max": 144.41439804434776,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5011624746024608,
"min": -0.9304667180686286,
"max": 1.6410727050494065,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 120.09299796819687,
"min": -30.705401696264744,
"max": 144.41439804434776,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0416686785814818,
"min": 0.03786970848027109,
"max": 10.948443429435,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.333494286518544,
"min": 3.3325343462638557,
"max": 186.123538300395,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692769339",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692771684"
},
"total": 2344.032611235,
"count": 1,
"self": 0.5454444799993325,
"children": {
"run_training.setup": {
"total": 0.04083253400017384,
"count": 1,
"self": 0.04083253400017384
},
"TrainerController.start_learning": {
"total": 2343.446334221,
"count": 1,
"self": 1.5915348500589062,
"children": {
"TrainerController._reset_env": {
"total": 4.161878548000004,
"count": 1,
"self": 4.161878548000004
},
"TrainerController.advance": {
"total": 2337.5953172739414,
"count": 63919,
"self": 1.5476113328854808,
"children": {
"env_step": {
"total": 1644.4520979660545,
"count": 63919,
"self": 1526.3325737141035,
"children": {
"SubprocessEnvManager._take_step": {
"total": 117.17676539799413,
"count": 63919,
"self": 5.210261126007026,
"children": {
"TorchPolicy.evaluate": {
"total": 111.9665042719871,
"count": 62551,
"self": 111.9665042719871
}
}
},
"workers": {
"total": 0.9427588539567751,
"count": 63919,
"self": 0.0,
"children": {
"worker_root": {
"total": 2337.704104787915,
"count": 63919,
"is_parallel": true,
"self": 936.5835461769507,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019750660001136566,
"count": 1,
"is_parallel": true,
"self": 0.0005654430001413857,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001409622999972271,
"count": 8,
"is_parallel": true,
"self": 0.001409622999972271
}
}
},
"UnityEnvironment.step": {
"total": 0.048849787000108336,
"count": 1,
"is_parallel": true,
"self": 0.0005827680001857516,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005298749999838037,
"count": 1,
"is_parallel": true,
"self": 0.0005298749999838037
},
"communicator.exchange": {
"total": 0.04567675099997359,
"count": 1,
"is_parallel": true,
"self": 0.04567675099997359
},
"steps_from_proto": {
"total": 0.002060392999965188,
"count": 1,
"is_parallel": true,
"self": 0.00037684799985981954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016835450001053687,
"count": 8,
"is_parallel": true,
"self": 0.0016835450001053687
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1401.1205586109645,
"count": 63918,
"is_parallel": true,
"self": 36.03965733684208,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.379487401021834,
"count": 63918,
"is_parallel": true,
"self": 24.379487401021834
},
"communicator.exchange": {
"total": 1229.1609794310828,
"count": 63918,
"is_parallel": true,
"self": 1229.1609794310828
},
"steps_from_proto": {
"total": 111.54043444201784,
"count": 63918,
"is_parallel": true,
"self": 22.11733000099366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.42310444102418,
"count": 511344,
"is_parallel": true,
"self": 89.42310444102418
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 691.5956079750013,
"count": 63919,
"self": 3.032254092023777,
"children": {
"process_trajectory": {
"total": 118.87114380497587,
"count": 63919,
"self": 118.65246198897603,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21868181599984382,
"count": 2,
"self": 0.21868181599984382
}
}
},
"_update_policy": {
"total": 569.6922100780016,
"count": 459,
"self": 371.5105803019758,
"children": {
"TorchPPOOptimizer.update": {
"total": 198.18162977602583,
"count": 22773,
"self": 198.18162977602583
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.739997040014714e-07,
"count": 1,
"self": 9.739997040014714e-07
},
"TrainerController._save_models": {
"total": 0.09760257500010994,
"count": 1,
"self": 0.0014999909999460215,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09610258400016392,
"count": 1,
"self": 0.09610258400016392
}
}
}
}
}
}
}