ppo-Pyramids / run_logs /timers.json
vicbentu's picture
First Push
b3bcad8 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3337717652320862,
"min": 0.3337717652320862,
"max": 1.5191583633422852,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10029.173828125,
"min": 10029.173828125,
"max": 46085.1875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989953.0,
"min": 29952.0,
"max": 989953.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989953.0,
"min": 29952.0,
"max": 989953.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6208828687667847,
"min": -0.11849431693553925,
"max": 0.6698364615440369,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 174.46807861328125,
"min": -28.557130813598633,
"max": 187.55421447753906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0009937218856066465,
"min": -0.019251415506005287,
"max": 0.34368908405303955,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.27923583984375,
"min": -5.313390731811523,
"max": 81.45431518554688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06637975936029164,
"min": 0.06516393746382423,
"max": 0.07374133162721548,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.929316631044083,
"min": 0.4989270236180565,
"max": 1.0572909826199368,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015151303585899823,
"min": 0.0010113535707143462,
"max": 0.015542685259355319,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21211825020259753,
"min": 0.010113535707143462,
"max": 0.21759759363097447,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.335868983314283e-06,
"min": 7.335868983314283e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010270216576639996,
"min": 0.00010270216576639996,
"max": 0.0036319081893640007,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244525714285715,
"min": 0.10244525714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342336000000002,
"min": 1.3886848,
"max": 2.6106360000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002542811885714285,
"min": 0.0002542811885714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035599366399999993,
"min": 0.0035599366399999993,
"max": 0.1210825364,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008479451760649681,
"min": 0.008479451760649681,
"max": 0.29954075813293457,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11871232092380524,
"min": 0.11871232092380524,
"max": 2.096785306930542,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 302.71875,
"min": 302.35,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29061.0,
"min": 15984.0,
"max": 32641.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6554399833867424,
"min": -1.0000000521540642,
"max": 1.6976499779522418,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 157.26679842174053,
"min": -30.673001691699028,
"max": 169.7649977952242,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6554399833867424,
"min": -1.0000000521540642,
"max": 1.6976499779522418,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 157.26679842174053,
"min": -30.673001691699028,
"max": 169.7649977952242,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.026382080173291463,
"min": 0.026382080173291463,
"max": 5.897820483893156,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.506297616462689,
"min": 2.506297616462689,
"max": 94.3651277422905,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738686258",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738688452"
},
"total": 2194.305023733,
"count": 1,
"self": 1.046406025000124,
"children": {
"run_training.setup": {
"total": 0.019117225000172766,
"count": 1,
"self": 0.019117225000172766
},
"TrainerController.start_learning": {
"total": 2193.2395004829996,
"count": 1,
"self": 1.4057130960700306,
"children": {
"TrainerController._reset_env": {
"total": 2.147486917000151,
"count": 1,
"self": 2.147486917000151
},
"TrainerController.advance": {
"total": 2189.5626948579297,
"count": 64026,
"self": 1.4098507568364766,
"children": {
"env_step": {
"total": 1511.201577152018,
"count": 64026,
"self": 1359.1689895409907,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.2471676330449,
"count": 64026,
"self": 4.630914103966688,
"children": {
"TorchPolicy.evaluate": {
"total": 146.61625352907822,
"count": 62542,
"self": 146.61625352907822
}
}
},
"workers": {
"total": 0.7854199779824285,
"count": 64026,
"self": 0.0,
"children": {
"worker_root": {
"total": 2188.0335371170563,
"count": 64026,
"is_parallel": true,
"self": 940.2210750621318,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019958039999892208,
"count": 1,
"is_parallel": true,
"self": 0.0006930829997600085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013027210002292122,
"count": 8,
"is_parallel": true,
"self": 0.0013027210002292122
}
}
},
"UnityEnvironment.step": {
"total": 0.054009006999876874,
"count": 1,
"is_parallel": true,
"self": 0.000681002999726843,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005722089999835589,
"count": 1,
"is_parallel": true,
"self": 0.0005722089999835589
},
"communicator.exchange": {
"total": 0.0507717320001575,
"count": 1,
"is_parallel": true,
"self": 0.0507717320001575
},
"steps_from_proto": {
"total": 0.001984063000008973,
"count": 1,
"is_parallel": true,
"self": 0.0004885350006134104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014955279993955628,
"count": 8,
"is_parallel": true,
"self": 0.0014955279993955628
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1247.8124620549245,
"count": 64025,
"is_parallel": true,
"self": 31.011972563876043,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.960942003004902,
"count": 64025,
"is_parallel": true,
"self": 22.960942003004902
},
"communicator.exchange": {
"total": 1098.8607034740296,
"count": 64025,
"is_parallel": true,
"self": 1098.8607034740296
},
"steps_from_proto": {
"total": 94.97884401401393,
"count": 64025,
"is_parallel": true,
"self": 19.133933622868653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.84491039114528,
"count": 512200,
"is_parallel": true,
"self": 75.84491039114528
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 676.951266949075,
"count": 64026,
"self": 2.647327563046929,
"children": {
"process_trajectory": {
"total": 128.76076032103379,
"count": 64026,
"self": 128.49301397903378,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26774634200000946,
"count": 2,
"self": 0.26774634200000946
}
}
},
"_update_policy": {
"total": 545.5431790649943,
"count": 450,
"self": 301.0182592379974,
"children": {
"TorchPPOOptimizer.update": {
"total": 244.5249198269969,
"count": 22776,
"self": 244.5249198269969
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0589997145871166e-06,
"count": 1,
"self": 1.0589997145871166e-06
},
"TrainerController._save_models": {
"total": 0.1236045529999501,
"count": 1,
"self": 0.0019112539998786815,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12169329900007142,
"count": 1,
"self": 0.12169329900007142
}
}
}
}
}
}
}