ppo-Pyramids / run_logs /timers.json
Deyvid89's picture
First Push
81c68bd verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.46760785579681396,
"min": 0.46760785579681396,
"max": 1.501516580581665,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13960.900390625,
"min": 13960.900390625,
"max": 45550.0078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989970.0,
"min": 29952.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989970.0,
"min": 29952.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6204172968864441,
"min": -0.09470121562480927,
"max": 0.6359407305717468,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 174.95767211914062,
"min": -22.8229923248291,
"max": 179.97122192382812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03943268209695816,
"min": 0.0018938126740977168,
"max": 0.3217858076095581,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 11.120016098022461,
"min": 0.4715593457221985,
"max": 76.26323699951172,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06498471974723158,
"min": 0.06408514233054305,
"max": 0.07437456883538592,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9747707962084736,
"min": 0.5018065789175533,
"max": 1.0884324947221422,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014461192886593897,
"min": 0.00029827680278959694,
"max": 0.01497814462392435,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21691789329890845,
"min": 0.0032810448306855662,
"max": 0.21691789329890845,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.462457512546665e-06,
"min": 7.462457512546665e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011193686268819998,
"min": 0.00011193686268819998,
"max": 0.0035081126306291998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248745333333335,
"min": 0.10248745333333335,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373118000000003,
"min": 1.3886848,
"max": 2.5693708,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025849658799999997,
"min": 0.00025849658799999997,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038774488199999997,
"min": 0.0038774488199999997,
"max": 0.11696014292,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011370858177542686,
"min": 0.011370858177542686,
"max": 0.40791645646095276,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17056287825107574,
"min": 0.16179035604000092,
"max": 2.855415105819702,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 317.3,
"min": 297.2916666666667,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31730.0,
"min": 15984.0,
"max": 32759.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.638984147791225,
"min": -1.0000000521540642,
"max": 1.7072736719721242,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 165.53739892691374,
"min": -30.996601596474648,
"max": 165.53739892691374,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.638984147791225,
"min": -1.0000000521540642,
"max": 1.7072736719721242,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 165.53739892691374,
"min": -30.996601596474648,
"max": 165.53739892691374,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03799675231828875,
"min": 0.03487860877112804,
"max": 8.502329995855689,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.837671984147164,
"min": 3.313467833257164,
"max": 136.03727993369102,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1770755850",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1770759541"
},
"total": 3690.482081139,
"count": 1,
"self": 1.3861758550010563,
"children": {
"run_training.setup": {
"total": 0.046169187000487,
"count": 1,
"self": 0.046169187000487
},
"TrainerController.start_learning": {
"total": 3689.0497360969985,
"count": 1,
"self": 2.5784604100335855,
"children": {
"TrainerController._reset_env": {
"total": 3.3637599849989783,
"count": 1,
"self": 3.3637599849989783
},
"TrainerController.advance": {
"total": 3682.960828145966,
"count": 63797,
"self": 2.5688477633011644,
"children": {
"env_step": {
"total": 2532.026782191855,
"count": 63797,
"self": 2355.4400213304907,
"children": {
"SubprocessEnvManager._take_step": {
"total": 175.0932828336281,
"count": 63797,
"self": 7.972021730498454,
"children": {
"TorchPolicy.evaluate": {
"total": 167.12126110312965,
"count": 62567,
"self": 167.12126110312965
}
}
},
"workers": {
"total": 1.4934780277362734,
"count": 63797,
"self": 0.0,
"children": {
"worker_root": {
"total": 3679.63137843408,
"count": 63797,
"is_parallel": true,
"self": 1521.554666676213,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002424050000627176,
"count": 1,
"is_parallel": true,
"self": 0.0006969930018385639,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001727056998788612,
"count": 8,
"is_parallel": true,
"self": 0.001727056998788612
}
}
},
"UnityEnvironment.step": {
"total": 0.07853432999945653,
"count": 1,
"is_parallel": true,
"self": 0.000667635000354494,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005478949988173554,
"count": 1,
"is_parallel": true,
"self": 0.0005478949988173554
},
"communicator.exchange": {
"total": 0.07509799900071812,
"count": 1,
"is_parallel": true,
"self": 0.07509799900071812
},
"steps_from_proto": {
"total": 0.0022208009995665634,
"count": 1,
"is_parallel": true,
"self": 0.0004616900005203206,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017591109990462428,
"count": 8,
"is_parallel": true,
"self": 0.0017591109990462428
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2158.076711757867,
"count": 63796,
"is_parallel": true,
"self": 49.59074895568483,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 32.53719220999301,
"count": 63796,
"is_parallel": true,
"self": 32.53719220999301
},
"communicator.exchange": {
"total": 1921.847789813275,
"count": 63796,
"is_parallel": true,
"self": 1921.847789813275
},
"steps_from_proto": {
"total": 154.10098077891416,
"count": 63796,
"is_parallel": true,
"self": 31.203878667256504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 122.89710211165766,
"count": 510368,
"is_parallel": true,
"self": 122.89710211165766
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1148.3651981908097,
"count": 63797,
"self": 4.852208379659714,
"children": {
"process_trajectory": {
"total": 171.66109698012588,
"count": 63797,
"self": 171.26155225512593,
"children": {
"RLTrainer._checkpoint": {
"total": 0.39954472499994154,
"count": 2,
"self": 0.39954472499994154
}
}
},
"_update_policy": {
"total": 971.8518928310241,
"count": 447,
"self": 380.20127014905665,
"children": {
"TorchPPOOptimizer.update": {
"total": 591.6506226819674,
"count": 22767,
"self": 591.6506226819674
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2780001270584762e-06,
"count": 1,
"self": 1.2780001270584762e-06
},
"TrainerController._save_models": {
"total": 0.1466862779998337,
"count": 1,
"self": 0.007356153000728227,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13933012499910546,
"count": 1,
"self": 0.13933012499910546
}
}
}
}
}
}
}