ppo-Pyramids / run_logs /timers.json
Mitchell0528's picture
First Push
ca98d14 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3287721574306488,
"min": 0.3193697929382324,
"max": 1.457699179649353,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9842.123046875,
"min": 9540.21484375,
"max": 44220.76171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989985.0,
"min": 29952.0,
"max": 989985.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989985.0,
"min": 29952.0,
"max": 989985.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6349695324897766,
"min": -0.1380254328250885,
"max": 0.6354854106903076,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 180.96630859375,
"min": -32.71202850341797,
"max": 181.7488250732422,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.026112502440810204,
"min": -0.024325113743543625,
"max": 0.3580947518348694,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.442063331604004,
"min": -6.275879383087158,
"max": 84.86845397949219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07028699548599045,
"min": 0.06581948579458698,
"max": 0.0736776296849834,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9840179368038662,
"min": 0.498060778671897,
"max": 1.0314868155897676,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018089213982290988,
"min": 0.0005531157756742555,
"max": 0.018776497814022675,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2532489957520738,
"min": 0.00608427353241681,
"max": 0.26287096939631743,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.392761821492855e-06,
"min": 7.392761821492855e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010349866550089997,
"min": 0.00010349866550089997,
"max": 0.0032237872254043,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246422142857145,
"min": 0.10246422142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344991000000002,
"min": 1.3886848,
"max": 2.4430175000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025617572071428565,
"min": 0.00025617572071428565,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003586460089999999,
"min": 0.003586460089999999,
"max": 0.10747211043,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007138479501008987,
"min": 0.007138479501008987,
"max": 0.33906790614128113,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09993871301412582,
"min": 0.09993871301412582,
"max": 2.3734753131866455,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 294.1326530612245,
"min": 294.1326530612245,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28825.0,
"min": 15984.0,
"max": 32824.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6650346736518704,
"min": -1.0000000521540642,
"max": 1.6777353298784508,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 163.1733980178833,
"min": -30.643401712179184,
"max": 175.92899769544601,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6650346736518704,
"min": -1.0000000521540642,
"max": 1.6777353298784508,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 163.1733980178833,
"min": -30.643401712179184,
"max": 175.92899769544601,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0215718733035123,
"min": 0.0215718733035123,
"max": 6.714136488735676,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.1140435837442055,
"min": 2.1140435837442055,
"max": 107.42618381977081,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742800071",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742802547"
},
"total": 2476.428709829,
"count": 1,
"self": 0.676707312000417,
"children": {
"run_training.setup": {
"total": 0.02005789199984065,
"count": 1,
"self": 0.02005789199984065
},
"TrainerController.start_learning": {
"total": 2475.731944625,
"count": 1,
"self": 1.5116897870234425,
"children": {
"TrainerController._reset_env": {
"total": 2.189857311999731,
"count": 1,
"self": 2.189857311999731
},
"TrainerController.advance": {
"total": 2471.9388140059773,
"count": 64107,
"self": 1.6433985848461816,
"children": {
"env_step": {
"total": 1749.5672737680597,
"count": 64107,
"self": 1579.7207315889082,
"children": {
"SubprocessEnvManager._take_step": {
"total": 168.94643320802334,
"count": 64107,
"self": 5.0225451780343064,
"children": {
"TorchPolicy.evaluate": {
"total": 163.92388802998903,
"count": 62554,
"self": 163.92388802998903
}
}
},
"workers": {
"total": 0.9001089711282475,
"count": 64107,
"self": 0.0,
"children": {
"worker_root": {
"total": 2469.9943806990022,
"count": 64107,
"is_parallel": true,
"self": 1016.179398908947,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020119809996685944,
"count": 1,
"is_parallel": true,
"self": 0.0006525089997921896,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013594719998764049,
"count": 8,
"is_parallel": true,
"self": 0.0013594719998764049
}
}
},
"UnityEnvironment.step": {
"total": 0.05249458099979165,
"count": 1,
"is_parallel": true,
"self": 0.0005743879996771284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004631199999494129,
"count": 1,
"is_parallel": true,
"self": 0.0004631199999494129
},
"communicator.exchange": {
"total": 0.04976121400022748,
"count": 1,
"is_parallel": true,
"self": 0.04976121400022748
},
"steps_from_proto": {
"total": 0.0016958589999376272,
"count": 1,
"is_parallel": true,
"self": 0.0003607539993026876,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013351050006349396,
"count": 8,
"is_parallel": true,
"self": 0.0013351050006349396
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1453.8149817900553,
"count": 64106,
"is_parallel": true,
"self": 33.848230649151446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.72019182904114,
"count": 64106,
"is_parallel": true,
"self": 24.72019182904114
},
"communicator.exchange": {
"total": 1290.9650760229833,
"count": 64106,
"is_parallel": true,
"self": 1290.9650760229833
},
"steps_from_proto": {
"total": 104.28148328887937,
"count": 64106,
"is_parallel": true,
"self": 21.625036735092635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.65644655378674,
"count": 512848,
"is_parallel": true,
"self": 82.65644655378674
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 720.7281416530714,
"count": 64107,
"self": 2.9053027309660138,
"children": {
"process_trajectory": {
"total": 141.60361290210676,
"count": 64107,
"self": 141.37621644210776,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2273964599989995,
"count": 2,
"self": 0.2273964599989995
}
}
},
"_update_policy": {
"total": 576.2192260199986,
"count": 446,
"self": 319.0857988020243,
"children": {
"TorchPPOOptimizer.update": {
"total": 257.13342721797426,
"count": 22839,
"self": 257.13342721797426
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.93999492493458e-07,
"count": 1,
"self": 9.93999492493458e-07
},
"TrainerController._save_models": {
"total": 0.09158252599991101,
"count": 1,
"self": 0.0016028089994506445,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08997971700046037,
"count": 1,
"self": 0.08997971700046037
}
}
}
}
}
}
}