Pyramids / run_logs /timers.json
yangzhou301's picture
First commit
8c1350e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.18968354165554047,
"min": 0.1755959391593933,
"max": 1.488650918006897,
"count": 69
},
"Pyramids.Policy.Entropy.sum": {
"value": 5708.7158203125,
"min": 5279.1162109375,
"max": 45159.71484375,
"count": 69
},
"Pyramids.Step.mean": {
"value": 2069951.0,
"min": 29952.0,
"max": 2069951.0,
"count": 69
},
"Pyramids.Step.sum": {
"value": 2069951.0,
"min": 29952.0,
"max": 2069951.0,
"count": 69
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8148035407066345,
"min": -0.09488106518983841,
"max": 0.8228468894958496,
"count": 69
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 246.88546752929688,
"min": -22.82662582397461,
"max": 250.74118041992188,
"count": 69
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01773555763065815,
"min": -0.026374325156211853,
"max": 0.37354576587677,
"count": 69
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.373874187469482,
"min": -7.64855432510376,
"max": 88.53034973144531,
"count": 69
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06496373595326163,
"min": 0.06472064311771343,
"max": 0.07432566444313633,
"count": 69
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9094923033456629,
"min": 0.5202796511019543,
"max": 1.1054783891852233,
"count": 69
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012761120731565392,
"min": 0.0002604049990297676,
"max": 0.016528935115738753,
"count": 69
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1786556902419155,
"min": 0.002604049990297676,
"max": 0.23261369204071045,
"count": 69
},
"Pyramids.Policy.LearningRate.mean": {
"value": 9.442926852359998e-05,
"min": 9.442926852359998e-05,
"max": 0.00029838354339596195,
"count": 69
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0013220097593303996,
"min": 0.0013220097593303996,
"max": 0.004027772457409233,
"count": 69
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.13147640000000002,
"min": 0.13147640000000002,
"max": 0.19946118095238097,
"count": 69
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.8406696000000002,
"min": 1.3962282666666668,
"max": 2.8425907666666665,
"count": 69
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00315449236,
"min": 0.00315449236,
"max": 0.009946171977142856,
"count": 69
},
"Pyramids.Policy.Beta.sum": {
"value": 0.04416289304,
"min": 0.04416289304,
"max": 0.13427481758999998,
"count": 69
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008442183956503868,
"min": 0.007369266357272863,
"max": 0.3536679148674011,
"count": 69
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11819057166576385,
"min": 0.10316973179578781,
"max": 2.475675344467163,
"count": 69
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 232.97744360902254,
"min": 217.27007299270073,
"max": 999.0,
"count": 69
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30986.0,
"min": 15984.0,
"max": 33244.0,
"count": 69
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.752616652259321,
"min": -1.0000000521540642,
"max": 1.7681284558816548,
"count": 69
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 231.34539809823036,
"min": -31.998401656746864,
"max": 242.2335984557867,
"count": 69
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.752616652259321,
"min": -1.0000000521540642,
"max": 1.7681284558816548,
"count": 69
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 231.34539809823036,
"min": -31.998401656746864,
"max": 242.2335984557867,
"count": 69
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.020492130744638747,
"min": 0.019907757191855908,
"max": 6.795010124333203,
"count": 69
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7049612582923146,
"min": 2.219608664920088,
"max": 108.72016198933125,
"count": 69
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 69
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 69
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704867424",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704872692"
},
"total": 5267.881148627001,
"count": 1,
"self": 0.33894925500135287,
"children": {
"run_training.setup": {
"total": 0.05402938999986873,
"count": 1,
"self": 0.05402938999986873
},
"TrainerController.start_learning": {
"total": 5267.488169982,
"count": 1,
"self": 3.289797792961508,
"children": {
"TrainerController._reset_env": {
"total": 2.676654252999924,
"count": 1,
"self": 2.676654252999924
},
"TrainerController.advance": {
"total": 5261.388628928038,
"count": 134176,
"self": 3.3574123511752987,
"children": {
"env_step": {
"total": 3897.98559204801,
"count": 134176,
"self": 3602.5180529010786,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.4263240859941,
"count": 134176,
"self": 10.634103851944474,
"children": {
"TorchPolicy.evaluate": {
"total": 282.79222023404964,
"count": 129600,
"self": 282.79222023404964
}
}
},
"workers": {
"total": 2.0412150609372475,
"count": 134175,
"self": 0.0,
"children": {
"worker_root": {
"total": 5255.301998365187,
"count": 134175,
"is_parallel": true,
"self": 1930.876381950986,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024066099999799917,
"count": 1,
"is_parallel": true,
"self": 0.0006730119989697414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017335980010102503,
"count": 8,
"is_parallel": true,
"self": 0.0017335980010102503
}
}
},
"UnityEnvironment.step": {
"total": 0.05946125399987068,
"count": 1,
"is_parallel": true,
"self": 0.0005948289999651024,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004888270000265038,
"count": 1,
"is_parallel": true,
"self": 0.0004888270000265038
},
"communicator.exchange": {
"total": 0.056643485999757104,
"count": 1,
"is_parallel": true,
"self": 0.056643485999757104
},
"steps_from_proto": {
"total": 0.0017341120001219679,
"count": 1,
"is_parallel": true,
"self": 0.0003462690001470037,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013878429999749642,
"count": 8,
"is_parallel": true,
"self": 0.0013878429999749642
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3324.4256164142007,
"count": 134174,
"is_parallel": true,
"self": 76.52160039914042,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 55.4744102419927,
"count": 134174,
"is_parallel": true,
"self": 55.4744102419927
},
"communicator.exchange": {
"total": 2965.843675842068,
"count": 134174,
"is_parallel": true,
"self": 2965.843675842068
},
"steps_from_proto": {
"total": 226.58592993099955,
"count": 134174,
"is_parallel": true,
"self": 47.414576585091254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 179.1713533459083,
"count": 1073392,
"is_parallel": true,
"self": 179.1713533459083
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1360.0456245288533,
"count": 134175,
"self": 6.41966488498565,
"children": {
"process_trajectory": {
"total": 286.37858654085676,
"count": 134175,
"self": 285.9974531978569,
"children": {
"RLTrainer._checkpoint": {
"total": 0.381133342999874,
"count": 4,
"self": 0.381133342999874
}
}
},
"_update_policy": {
"total": 1067.247373103011,
"count": 956,
"self": 636.0507327511059,
"children": {
"TorchPPOOptimizer.update": {
"total": 431.196640351905,
"count": 47256,
"self": 431.196640351905
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.401000190526247e-06,
"count": 1,
"self": 1.401000190526247e-06
},
"TrainerController._save_models": {
"total": 0.13308760699965205,
"count": 1,
"self": 0.0020231939997756854,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13106441299987637,
"count": 1,
"self": 0.13106441299987637
}
}
}
}
}
}
}