ppo-PyramidsRND / run_logs /timers.json
zhngq's picture
new
b2ca451 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.9302440881729126,
"min": 0.9302440881729126,
"max": 1.4939602613449097,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 28338.955078125,
"min": 28338.955078125,
"max": 45320.77734375,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89984.0,
"min": 29952.0,
"max": 89984.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89984.0,
"min": 29952.0,
"max": 89984.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06208610534667969,
"min": -0.06208610534667969,
"max": 0.10522226244211197,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -15.024837493896484,
"min": -15.024837493896484,
"max": 24.93767547607422,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.15057668089866638,
"min": 0.15057668089866638,
"max": 0.36229386925697327,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 36.43955612182617,
"min": 36.43955612182617,
"max": 85.8636474609375,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.070543983310854,
"min": 0.06819148283418262,
"max": 0.07067118524065741,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8465277997302479,
"min": 0.49469829668460186,
"max": 0.8465277997302479,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0015385979939076765,
"min": 0.0015385979939076765,
"max": 0.004066233167125085,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.018463175926892117,
"min": 0.018463175926892117,
"max": 0.0284636321698756,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.696432434525e-05,
"min": 7.696432434525e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000923571892143,
"min": 0.000923571892143,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12565475,
"min": 0.12565475,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.507857,
"min": 1.2868480000000002,
"max": 1.507857,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.002572909525,
"min": 0.002572909525,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0308749143,
"min": 0.0308749143,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.11169835925102234,
"min": 0.11169835925102234,
"max": 0.35565438866615295,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.340380311012268,
"min": 1.340380311012268,
"max": 2.4895806312561035,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 977.7575757575758,
"min": 975.1515151515151,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32266.0,
"min": 15984.0,
"max": 32266.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.7967394439108444,
"min": -1.0000000521540642,
"max": -0.7967394439108444,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -26.292401649057865,
"min": -28.211001701653004,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.7967394439108444,
"min": -1.0000000521540642,
"max": -0.7967394439108444,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -26.292401649057865,
"min": -28.211001701653004,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.2650139693057898,
"min": 1.2650139693057898,
"max": 6.859007843770087,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 41.745460987091064,
"min": 41.745460987091064,
"max": 109.74412550032139,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1750947918",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1750948109"
},
"total": 190.41941414999997,
"count": 1,
"self": 0.4809167369996885,
"children": {
"run_training.setup": {
"total": 0.020570664000160832,
"count": 1,
"self": 0.020570664000160832
},
"TrainerController.start_learning": {
"total": 189.91792674900012,
"count": 1,
"self": 0.11317930498762507,
"children": {
"TrainerController._reset_env": {
"total": 2.241953634000083,
"count": 1,
"self": 2.241953634000083
},
"TrainerController.advance": {
"total": 187.45257845001242,
"count": 6301,
"self": 0.13110822000589906,
"children": {
"env_step": {
"total": 125.03569683400792,
"count": 6301,
"self": 110.99123670899917,
"children": {
"SubprocessEnvManager._take_step": {
"total": 13.975807616017164,
"count": 6301,
"self": 0.43651017601860076,
"children": {
"TorchPolicy.evaluate": {
"total": 13.539297439998563,
"count": 6288,
"self": 13.539297439998563
}
}
},
"workers": {
"total": 0.06865250899159037,
"count": 6301,
"self": 0.0,
"children": {
"worker_root": {
"total": 189.43443242899957,
"count": 6301,
"is_parallel": true,
"self": 88.545257619013,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018820230000073934,
"count": 1,
"is_parallel": true,
"self": 0.0005696889998034749,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013123340002039185,
"count": 8,
"is_parallel": true,
"self": 0.0013123340002039185
}
}
},
"UnityEnvironment.step": {
"total": 0.04708416799985571,
"count": 1,
"is_parallel": true,
"self": 0.0005487609996635001,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004509369998686452,
"count": 1,
"is_parallel": true,
"self": 0.0004509369998686452
},
"communicator.exchange": {
"total": 0.044466590000183714,
"count": 1,
"is_parallel": true,
"self": 0.044466590000183714
},
"steps_from_proto": {
"total": 0.0016178800001398486,
"count": 1,
"is_parallel": true,
"self": 0.0003429609998875094,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012749190002523392,
"count": 8,
"is_parallel": true,
"self": 0.0012749190002523392
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 100.88917480998657,
"count": 6300,
"is_parallel": true,
"self": 3.045252129995106,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.1298709690015585,
"count": 6300,
"is_parallel": true,
"self": 2.1298709690015585
},
"communicator.exchange": {
"total": 86.69355271098902,
"count": 6300,
"is_parallel": true,
"self": 86.69355271098902
},
"steps_from_proto": {
"total": 9.020499000000882,
"count": 6300,
"is_parallel": true,
"self": 1.7321952319757656,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.288303768025116,
"count": 50400,
"is_parallel": true,
"self": 7.288303768025116
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 62.2857733959986,
"count": 6301,
"self": 0.14626521199238596,
"children": {
"process_trajectory": {
"total": 11.175305092006283,
"count": 6301,
"self": 11.175305092006283
},
"_update_policy": {
"total": 50.964203091999934,
"count": 32,
"self": 28.262393453001096,
"children": {
"TorchPPOOptimizer.update": {
"total": 22.70180963899884,
"count": 2268,
"self": 22.70180963899884
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.010000212583691e-07,
"count": 1,
"self": 9.010000212583691e-07
},
"TrainerController._save_models": {
"total": 0.11021445899996252,
"count": 1,
"self": 0.0017247739999675105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.108489684999995,
"count": 1,
"self": 0.108489684999995
}
}
}
}
}
}
}