ppo-PyramidsRND / run_logs /timers.json
arnemaass's picture
First Push
54752ba verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6166474223136902,
"min": 0.613519012928009,
"max": 1.4871398210525513,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18617.818359375,
"min": 18150.345703125,
"max": 45113.875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989907.0,
"min": 29952.0,
"max": 989907.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989907.0,
"min": 29952.0,
"max": 989907.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5205097198486328,
"min": -0.12081731110811234,
"max": 0.5205097198486328,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 141.57864379882812,
"min": -28.99615478515625,
"max": 141.57864379882812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 3.2143611907958984,
"min": -0.32287856936454773,
"max": 3.2143611907958984,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 874.3062133789062,
"min": -87.17721557617188,
"max": 874.3062133789062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06902512865019077,
"min": 0.06458118757916766,
"max": 0.07375207201485773,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9663518011026707,
"min": 0.5001571698916472,
"max": 1.0590406453702599,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.9818238058526602,
"min": 0.00016723729639785872,
"max": 0.9818238058526602,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 13.745533281937242,
"min": 0.0020068475567743046,
"max": 13.745533281937242,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.546747484449997e-06,
"min": 7.546747484449997e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010565446478229996,
"min": 0.00010565446478229996,
"max": 0.0031368512543829994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251555,
"min": 0.10251555,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352177,
"min": 1.3691136000000002,
"max": 2.345617,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026130344499999996,
"min": 0.00026130344499999996,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036582482299999993,
"min": 0.0036582482299999993,
"max": 0.10458713830000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007841663435101509,
"min": 0.007841663435101509,
"max": 0.3668633997440338,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10978329181671143,
"min": 0.10978329181671143,
"max": 2.5680437088012695,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 353.75,
"min": 353.75,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29715.0,
"min": 15984.0,
"max": 33734.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6224333125920523,
"min": -1.0000000521540642,
"max": 1.6224333125920523,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 136.2843982577324,
"min": -32.000001668930054,
"max": 136.2843982577324,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6224333125920523,
"min": -1.0000000521540642,
"max": 1.6224333125920523,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 136.2843982577324,
"min": -32.000001668930054,
"max": 136.2843982577324,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02913980148661635,
"min": 0.02913980148661635,
"max": 7.264233510941267,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4477433248757734,
"min": 2.4477433248757734,
"max": 116.22773617506027,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745101839",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1745104061"
},
"total": 2222.122740235,
"count": 1,
"self": 0.47503006700071637,
"children": {
"run_training.setup": {
"total": 0.02610630199978914,
"count": 1,
"self": 0.02610630199978914
},
"TrainerController.start_learning": {
"total": 2221.6216038659995,
"count": 1,
"self": 1.3887228439793944,
"children": {
"TrainerController._reset_env": {
"total": 2.6475491420001163,
"count": 1,
"self": 2.6475491420001163
},
"TrainerController.advance": {
"total": 2217.4823861610203,
"count": 63612,
"self": 1.4558593860092515,
"children": {
"env_step": {
"total": 1533.5663014149854,
"count": 63612,
"self": 1375.5761058469188,
"children": {
"SubprocessEnvManager._take_step": {
"total": 157.1589075040322,
"count": 63612,
"self": 4.736006325005974,
"children": {
"TorchPolicy.evaluate": {
"total": 152.4229011790262,
"count": 62565,
"self": 152.4229011790262
}
}
},
"workers": {
"total": 0.8312880640344247,
"count": 63612,
"self": 0.0,
"children": {
"worker_root": {
"total": 2216.3890465150416,
"count": 63612,
"is_parallel": true,
"self": 954.4338118980811,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003145923999909428,
"count": 1,
"is_parallel": true,
"self": 0.0007895820001522225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023563419997572055,
"count": 8,
"is_parallel": true,
"self": 0.0023563419997572055
}
}
},
"UnityEnvironment.step": {
"total": 0.06700158100011322,
"count": 1,
"is_parallel": true,
"self": 0.0005434619999959978,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004978160000064236,
"count": 1,
"is_parallel": true,
"self": 0.0004978160000064236
},
"communicator.exchange": {
"total": 0.06432661000008011,
"count": 1,
"is_parallel": true,
"self": 0.06432661000008011
},
"steps_from_proto": {
"total": 0.0016336930000306893,
"count": 1,
"is_parallel": true,
"self": 0.00035522699954526615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012784660004854231,
"count": 8,
"is_parallel": true,
"self": 0.0012784660004854231
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1261.9552346169605,
"count": 63611,
"is_parallel": true,
"self": 31.9510277489519,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.140473352988238,
"count": 63611,
"is_parallel": true,
"self": 23.140473352988238
},
"communicator.exchange": {
"total": 1109.6102065159764,
"count": 63611,
"is_parallel": true,
"self": 1109.6102065159764
},
"steps_from_proto": {
"total": 97.25352699904397,
"count": 63611,
"is_parallel": true,
"self": 19.62874044304226,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.62478655600171,
"count": 508888,
"is_parallel": true,
"self": 77.62478655600171
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 682.4602253600253,
"count": 63612,
"self": 2.561019203041269,
"children": {
"process_trajectory": {
"total": 129.9957219649848,
"count": 63612,
"self": 129.8043738609847,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19134810400009883,
"count": 2,
"self": 0.19134810400009883
}
}
},
"_update_policy": {
"total": 549.9034841919993,
"count": 438,
"self": 301.2776891449862,
"children": {
"TorchPPOOptimizer.update": {
"total": 248.62579504701307,
"count": 22827,
"self": 248.62579504701307
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.819997674436308e-07,
"count": 1,
"self": 8.819997674436308e-07
},
"TrainerController._save_models": {
"total": 0.10294483699999546,
"count": 1,
"self": 0.001309223000134807,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10163561399986065,
"count": 1,
"self": 0.10163561399986065
}
}
}
}
}
}
}