PyramydsRND / run_logs /timers.json
jakubgajski's picture
HF RL course
1c90c00
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.20050130784511566,
"min": 0.19169797003269196,
"max": 0.2917623221874237,
"count": 34
},
"Pyramids.Policy.Entropy.sum": {
"value": 6040.70361328125,
"min": 3002.727783203125,
"max": 8827.560546875,
"count": 34
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 241.7642276422764,
"min": 211.975,
"max": 312.6774193548387,
"count": 34
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29737.0,
"min": 8479.0,
"max": 32058.0,
"count": 34
},
"Pyramids.Step.mean": {
"value": 2999957.0,
"min": 2009987.0,
"max": 2999957.0,
"count": 34
},
"Pyramids.Step.sum": {
"value": 2999957.0,
"min": 2009987.0,
"max": 2999957.0,
"count": 34
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7713999152183533,
"min": 0.6358982920646667,
"max": 0.7892741560935974,
"count": 34
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 225.248779296875,
"min": 59.13854217529297,
"max": 233.15284729003906,
"count": 34
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005710362922400236,
"min": 0.005461862310767174,
"max": 0.026293151080608368,
"count": 34
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.6674259901046753,
"min": 1.3461965322494507,
"max": 7.756479740142822,
"count": 34
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7591774031039207,
"min": 1.6012687989140069,
"max": 1.7865384488533704,
"count": 34
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 218.13799798488617,
"min": 69.67499950528145,
"max": 231.0405979603529,
"count": 34
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7591774031039207,
"min": 1.6012687989140069,
"max": 1.7865384488533704,
"count": 34
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 218.13799798488617,
"min": 69.67499950528145,
"max": 231.0405979603529,
"count": 34
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.010398394694998241,
"min": 0.010362799834789863,
"max": 0.01481215390745689,
"count": 34
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.2894009421797819,
"min": 0.44559615731122904,
"max": 1.5044092741591157,
"count": 34
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 34
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 34
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06910448090056888,
"min": 0.06701293528264311,
"max": 0.07417189808620606,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.20731344270170665,
"min": 0.13631293868093053,
"max": 0.22251569425861817,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01524706046572343,
"min": 0.012566591292367472,
"max": 0.016535253958621373,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04574118139717029,
"min": 0.026997257619223093,
"max": 0.049605761875864116,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.06169964613334e-06,
"min": 1.06169964613334e-06,
"max": 9.793286735573332e-05,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.18509893840002e-06,
"min": 3.18509893840002e-06,
"max": 0.0002937986020672,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10035386666666668,
"min": 0.10035386666666668,
"max": 0.13264426666666665,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.30106160000000004,
"min": 0.2210010666666667,
"max": 0.3979328,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 4.535128000000024e-05,
"min": 4.535128000000024e-05,
"max": 0.00327116224,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00013605384000000073,
"min": 0.00013605384000000073,
"max": 0.00981348672,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00416584312915802,
"min": 0.0040358733385801315,
"max": 0.004870005417615175,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.01249752938747406,
"min": 0.00846722163259983,
"max": 0.014610015787184238,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683880154",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683882939"
},
"total": 2784.2696735679992,
"count": 1,
"self": 0.5446636629994828,
"children": {
"run_training.setup": {
"total": 0.03739161499925103,
"count": 1,
"self": 0.03739161499925103
},
"TrainerController.start_learning": {
"total": 2783.6876182900005,
"count": 1,
"self": 1.5056411250634483,
"children": {
"TrainerController._reset_env": {
"total": 3.8262678279998,
"count": 1,
"self": 3.8262678279998
},
"TrainerController.advance": {
"total": 2778.167793399937,
"count": 65503,
"self": 1.4492462506814263,
"children": {
"env_step": {
"total": 1882.3800309799626,
"count": 65503,
"self": 1765.581686727819,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.96026929217987,
"count": 65503,
"self": 4.755355206159948,
"children": {
"TorchPolicy.evaluate": {
"total": 111.20491408601993,
"count": 62569,
"self": 111.20491408601993
}
}
},
"workers": {
"total": 0.8380749599637056,
"count": 65503,
"self": 0.0,
"children": {
"worker_root": {
"total": 2778.0892850379496,
"count": 65503,
"is_parallel": true,
"self": 1126.8145868987158,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001663746000303945,
"count": 1,
"is_parallel": true,
"self": 0.0005226020002737641,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001141144000030181,
"count": 8,
"is_parallel": true,
"self": 0.001141144000030181
}
}
},
"UnityEnvironment.step": {
"total": 0.04883291499936604,
"count": 1,
"is_parallel": true,
"self": 0.0006346179989122902,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046008200024516555,
"count": 1,
"is_parallel": true,
"self": 0.00046008200024516555
},
"communicator.exchange": {
"total": 0.04573440599961032,
"count": 1,
"is_parallel": true,
"self": 0.04573440599961032
},
"steps_from_proto": {
"total": 0.0020038090005982667,
"count": 1,
"is_parallel": true,
"self": 0.00042162400222878205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015821849983694847,
"count": 8,
"is_parallel": true,
"self": 0.0015821849983694847
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1651.2746981392338,
"count": 65502,
"is_parallel": true,
"self": 31.880492302464518,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.42365856122433,
"count": 65502,
"is_parallel": true,
"self": 22.42365856122433
},
"communicator.exchange": {
"total": 1498.6431749411222,
"count": 65502,
"is_parallel": true,
"self": 1498.6431749411222
},
"steps_from_proto": {
"total": 98.32737233442276,
"count": 65502,
"is_parallel": true,
"self": 20.24562789644824,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.08174443797452,
"count": 524016,
"is_parallel": true,
"self": 78.08174443797452
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 894.3385161692931,
"count": 65503,
"self": 2.7951120823527162,
"children": {
"process_trajectory": {
"total": 117.07126433492886,
"count": 65503,
"self": 116.70099271292747,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37027162200138264,
"count": 2,
"self": 0.37027162200138264
}
}
},
"_update_policy": {
"total": 774.4721397520116,
"count": 97,
"self": 469.8033551721992,
"children": {
"TorchPPOOptimizer.update": {
"total": 304.6687845798124,
"count": 31056,
"self": 304.6687845798124
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0660005500540137e-06,
"count": 1,
"self": 1.0660005500540137e-06
},
"TrainerController._save_models": {
"total": 0.18791487099952064,
"count": 1,
"self": 0.004501023000557325,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18341384799896332,
"count": 1,
"self": 0.18341384799896332
}
}
}
}
}
}
}