ppo-Pyramids1 / run_logs /timers.json
Wodeyuanbukongda's picture
First Push
ab4ad9e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6770375967025757,
"min": 0.6770375967025757,
"max": 1.4741123914718628,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20256.96484375,
"min": 20180.3203125,
"max": 44718.671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989921.0,
"min": 29952.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989921.0,
"min": 29952.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.15256021916866302,
"min": -0.14001105725765228,
"max": 0.19619587063789368,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 38.902854919433594,
"min": -33.60265350341797,
"max": 50.029945373535156,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.1216699481010437,
"min": -0.1216699481010437,
"max": 0.2476741522550583,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -31.025836944580078,
"min": -31.025836944580078,
"max": 59.68947219848633,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.070739624888382,
"min": 0.06336003854402149,
"max": 0.07443970094789706,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9903547484373479,
"min": 0.5109099950935777,
"max": 1.0421558132705588,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014404837656516817,
"min": 6.339412249009199e-05,
"max": 0.014404837656516817,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20166772719123544,
"min": 0.0008241235923711958,
"max": 0.20166772719123544,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5158689233142885e-06,
"min": 7.5158689233142885e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010522216492640004,
"min": 0.00010522216492640004,
"max": 0.0032561621146126995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250525714285715,
"min": 0.10250525714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350736,
"min": 1.3886848,
"max": 2.3853872999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002602751885714287,
"min": 0.0002602751885714287,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036438526400000017,
"min": 0.0036438526400000017,
"max": 0.10856019127,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010482578538358212,
"min": 0.010482578538358212,
"max": 0.3709614872932434,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1467560976743698,
"min": 0.1467560976743698,
"max": 2.5967304706573486,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 669.4347826086956,
"min": 650.1063829787234,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30794.0,
"min": 15984.0,
"max": 32532.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.678195611614248,
"min": -1.0000000521540642,
"max": 0.9242169852269456,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 31.19699813425541,
"min": -31.99640165269375,
"max": 43.43819830566645,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.678195611614248,
"min": -1.0000000521540642,
"max": 0.9242169852269456,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 31.19699813425541,
"min": -31.99640165269375,
"max": 43.43819830566645,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07359139761462322,
"min": 0.07359139761462322,
"max": 7.453615984879434,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.385204290272668,
"min": 3.20029418845661,
"max": 119.25785575807095,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741312289",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741314489"
},
"total": 2199.68297241,
"count": 1,
"self": 0.5329618240002674,
"children": {
"run_training.setup": {
"total": 0.024265543999945294,
"count": 1,
"self": 0.024265543999945294
},
"TrainerController.start_learning": {
"total": 2199.125745042,
"count": 1,
"self": 1.4025505819554382,
"children": {
"TrainerController._reset_env": {
"total": 3.564981853000063,
"count": 1,
"self": 3.564981853000063
},
"TrainerController.advance": {
"total": 2194.016983172044,
"count": 63264,
"self": 1.4278928570383869,
"children": {
"env_step": {
"total": 1513.7613558190321,
"count": 63264,
"self": 1353.3529466970394,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.56073787802507,
"count": 63264,
"self": 4.8241363980391725,
"children": {
"TorchPolicy.evaluate": {
"total": 154.7366014799859,
"count": 62551,
"self": 154.7366014799859
}
}
},
"workers": {
"total": 0.8476712439676248,
"count": 63264,
"self": 0.0,
"children": {
"worker_root": {
"total": 2193.9286826520265,
"count": 63264,
"is_parallel": true,
"self": 956.9467437520548,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005427457999985563,
"count": 1,
"is_parallel": true,
"self": 0.004047743999990416,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013797139999951469,
"count": 8,
"is_parallel": true,
"self": 0.0013797139999951469
}
}
},
"UnityEnvironment.step": {
"total": 0.0671848649999447,
"count": 1,
"is_parallel": true,
"self": 0.0008956520000538148,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003980659998887859,
"count": 1,
"is_parallel": true,
"self": 0.0003980659998887859
},
"communicator.exchange": {
"total": 0.06415476500001205,
"count": 1,
"is_parallel": true,
"self": 0.06415476500001205
},
"steps_from_proto": {
"total": 0.0017363819999900443,
"count": 1,
"is_parallel": true,
"self": 0.0004153209997639351,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013210610002261092,
"count": 8,
"is_parallel": true,
"self": 0.0013210610002261092
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1236.9819388999717,
"count": 63263,
"is_parallel": true,
"self": 31.862789916864585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.17675419303896,
"count": 63263,
"is_parallel": true,
"self": 23.17675419303896
},
"communicator.exchange": {
"total": 1083.49195943101,
"count": 63263,
"is_parallel": true,
"self": 1083.49195943101
},
"steps_from_proto": {
"total": 98.45043535905825,
"count": 63263,
"is_parallel": true,
"self": 20.101415536205195,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.34901982285305,
"count": 506104,
"is_parallel": true,
"self": 78.34901982285305
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 678.8277344959736,
"count": 63264,
"self": 2.605309592981939,
"children": {
"process_trajectory": {
"total": 127.4891296369899,
"count": 63264,
"self": 127.12092484999016,
"children": {
"RLTrainer._checkpoint": {
"total": 0.36820478699974046,
"count": 2,
"self": 0.36820478699974046
}
}
},
"_update_policy": {
"total": 548.7332952660017,
"count": 444,
"self": 301.96831781598894,
"children": {
"TorchPPOOptimizer.update": {
"total": 246.7649774500128,
"count": 22815,
"self": 246.7649774500128
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4020001799508464e-06,
"count": 1,
"self": 1.4020001799508464e-06
},
"TrainerController._save_models": {
"total": 0.14122803300006126,
"count": 1,
"self": 0.002361922000090999,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13886611099997026,
"count": 1,
"self": 0.13886611099997026
}
}
}
}
}
}
}