Pyramids / run_logs /timers.json
JulianZas's picture
commit-1
c572f11
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.48691362142562866,
"min": 0.4835819900035858,
"max": 1.5011435747146606,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14693.10546875,
"min": 14406.875,
"max": 45538.69140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989913.0,
"min": 29952.0,
"max": 989913.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989913.0,
"min": 29952.0,
"max": 989913.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3981753885746002,
"min": -0.09427683800458908,
"max": 0.4258575439453125,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 106.31282806396484,
"min": -22.81499481201172,
"max": 114.55567932128906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0051437499932944775,
"min": 0.0051437499932944775,
"max": 0.31874677538871765,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.37338125705719,
"min": 1.37338125705719,
"max": 75.54298400878906,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06720263277834636,
"min": 0.06340403685836853,
"max": 0.07331536769368623,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9408368588968491,
"min": 0.5104662379304793,
"max": 1.0388012104473696,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01342881472976101,
"min": 0.00021887230763914541,
"max": 0.013465048806111332,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18800340621665412,
"min": 0.0028453399993088905,
"max": 0.18851068328555864,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.26494043552857e-06,
"min": 7.26494043552857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010170916609739998,
"min": 0.00010170916609739998,
"max": 0.0033750328749890994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242161428571431,
"min": 0.10242161428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339026000000004,
"min": 1.3886848,
"max": 2.4250109,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002519192671428572,
"min": 0.0002519192671428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035268697400000003,
"min": 0.0035268697400000003,
"max": 0.11251858891000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008400741964578629,
"min": 0.008280488662421703,
"max": 0.48458951711654663,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1176103875041008,
"min": 0.1176103875041008,
"max": 3.3921265602111816,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 445.7096774193548,
"min": 445.7096774193548,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27634.0,
"min": 15984.0,
"max": 33468.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4574806234769282,
"min": -1.0000000521540642,
"max": 1.4574806234769282,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 90.36379865556955,
"min": -31.992801636457443,
"max": 94.52719808369875,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4574806234769282,
"min": -1.0000000521540642,
"max": 1.4574806234769282,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 90.36379865556955,
"min": -31.992801636457443,
"max": 94.52719808369875,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03911367055327226,
"min": 0.03911367055327226,
"max": 10.31178966909647,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.42504757430288,
"min": 2.42504757430288,
"max": 164.98863470554352,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679689008",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679691179"
},
"total": 2171.2356473620002,
"count": 1,
"self": 0.4849853100004111,
"children": {
"run_training.setup": {
"total": 0.10360009500004708,
"count": 1,
"self": 0.10360009500004708
},
"TrainerController.start_learning": {
"total": 2170.647061957,
"count": 1,
"self": 1.2947900569865851,
"children": {
"TrainerController._reset_env": {
"total": 6.0556426639998335,
"count": 1,
"self": 6.0556426639998335
},
"TrainerController.advance": {
"total": 2163.1664063900134,
"count": 63530,
"self": 1.443214681032714,
"children": {
"env_step": {
"total": 1541.5136956600159,
"count": 63530,
"self": 1435.341317924138,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.36083039488744,
"count": 63530,
"self": 4.584346185936965,
"children": {
"TorchPolicy.evaluate": {
"total": 100.77648420895048,
"count": 62576,
"self": 100.77648420895048
}
}
},
"workers": {
"total": 0.8115473409905007,
"count": 63530,
"self": 0.0,
"children": {
"worker_root": {
"total": 2165.8552824880067,
"count": 63530,
"is_parallel": true,
"self": 843.0874047269192,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018930290000298555,
"count": 1,
"is_parallel": true,
"self": 0.000576163000005181,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013168660000246746,
"count": 8,
"is_parallel": true,
"self": 0.0013168660000246746
}
}
},
"UnityEnvironment.step": {
"total": 0.0475026120000166,
"count": 1,
"is_parallel": true,
"self": 0.0005337689999578288,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004597399999966001,
"count": 1,
"is_parallel": true,
"self": 0.0004597399999966001
},
"communicator.exchange": {
"total": 0.04495155900008285,
"count": 1,
"is_parallel": true,
"self": 0.04495155900008285
},
"steps_from_proto": {
"total": 0.0015575439999793161,
"count": 1,
"is_parallel": true,
"self": 0.00034359100004621723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001213952999933099,
"count": 8,
"is_parallel": true,
"self": 0.001213952999933099
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1322.7678777610874,
"count": 63529,
"is_parallel": true,
"self": 31.007937516942775,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.255851480028014,
"count": 63529,
"is_parallel": true,
"self": 22.255851480028014
},
"communicator.exchange": {
"total": 1178.570662281037,
"count": 63529,
"is_parallel": true,
"self": 1178.570662281037
},
"steps_from_proto": {
"total": 90.93342648307976,
"count": 63529,
"is_parallel": true,
"self": 19.089017589072682,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.84440889400707,
"count": 508232,
"is_parallel": true,
"self": 71.84440889400707
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 620.2094960489649,
"count": 63530,
"self": 2.56144504904546,
"children": {
"process_trajectory": {
"total": 116.63084985191699,
"count": 63530,
"self": 116.38088960091636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2499602510006298,
"count": 2,
"self": 0.2499602510006298
}
}
},
"_update_policy": {
"total": 501.0172011480024,
"count": 447,
"self": 317.81410646197537,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.20309468602704,
"count": 22779,
"self": 183.20309468602704
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3910002962802537e-06,
"count": 1,
"self": 1.3910002962802537e-06
},
"TrainerController._save_models": {
"total": 0.13022145500008264,
"count": 1,
"self": 0.00157710399980715,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1286443510002755,
"count": 1,
"self": 0.1286443510002755
}
}
}
}
}
}
}