ppo-Pyramids / run_logs /timers.json
PedroPlusPlus's picture
First Push
aaa6053 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3490028977394104,
"min": 0.3490028977394104,
"max": 1.5002765655517578,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10313.7333984375,
"min": 10313.7333984375,
"max": 45512.390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989898.0,
"min": 29952.0,
"max": 989898.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989898.0,
"min": 29952.0,
"max": 989898.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3843599855899811,
"min": -0.14145436882972717,
"max": 0.4473913311958313,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 99.93359375,
"min": -33.94904708862305,
"max": 121.24304962158203,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013444162905216217,
"min": -0.010150044225156307,
"max": 0.5924503803253174,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.4954824447631836,
"min": -2.5578112602233887,
"max": 140.41073608398438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06755112477029408,
"min": 0.06432348161026659,
"max": 0.07349624312658641,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9457157467841171,
"min": 0.5144737018861049,
"max": 1.0323032667083145,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015232850942820298,
"min": 0.0008376009070016619,
"max": 0.015232850942820298,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21325991319948417,
"min": 0.00837600907001662,
"max": 0.22411914748555123,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.709733144407142e-06,
"min": 7.709733144407142e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001079362640217,
"min": 0.0001079362640217,
"max": 0.0035081240306254003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256987857142856,
"min": 0.10256987857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359783,
"min": 1.3886848,
"max": 2.5693746,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026673086928571423,
"min": 0.00026673086928571423,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037342321699999994,
"min": 0.0037342321699999994,
"max": 0.11696052254,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015443922020494938,
"min": 0.015443922020494938,
"max": 0.6668349504470825,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21621491014957428,
"min": 0.21621491014957428,
"max": 4.667844772338867,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 455.6307692307692,
"min": 404.9142857142857,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29616.0,
"min": 15984.0,
"max": 33354.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3660605830902404,
"min": -1.0000000521540642,
"max": 1.4483323688028564,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 90.15999848395586,
"min": -30.999201618134975,
"max": 102.8315981850028,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3660605830902404,
"min": -1.0000000521540642,
"max": 1.4483323688028564,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 90.15999848395586,
"min": -30.999201618134975,
"max": 102.8315981850028,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07237672993108969,
"min": 0.07048407845847708,
"max": 13.606595835648477,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.776864175451919,
"min": 4.776864175451919,
"max": 217.70553337037563,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740642738",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740644815"
},
"total": 2077.6411050710003,
"count": 1,
"self": 0.7310277990000031,
"children": {
"run_training.setup": {
"total": 0.02015310199999476,
"count": 1,
"self": 0.02015310199999476
},
"TrainerController.start_learning": {
"total": 2076.88992417,
"count": 1,
"self": 1.1532665509566868,
"children": {
"TrainerController._reset_env": {
"total": 2.418771798999842,
"count": 1,
"self": 2.418771798999842
},
"TrainerController.advance": {
"total": 2073.198000866044,
"count": 63624,
"self": 1.239706204142749,
"children": {
"env_step": {
"total": 1416.6882651508977,
"count": 63624,
"self": 1274.1472422438158,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.8671150080679,
"count": 63624,
"self": 4.428074222119903,
"children": {
"TorchPolicy.evaluate": {
"total": 137.439040785948,
"count": 62552,
"self": 137.439040785948
}
}
},
"workers": {
"total": 0.6739078990140115,
"count": 63624,
"self": 0.0,
"children": {
"worker_root": {
"total": 2072.3051315729426,
"count": 63624,
"is_parallel": true,
"self": 899.1633680768853,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019374380001409008,
"count": 1,
"is_parallel": true,
"self": 0.0006278690000272036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013095690001136973,
"count": 8,
"is_parallel": true,
"self": 0.0013095690001136973
}
}
},
"UnityEnvironment.step": {
"total": 0.06787386599989986,
"count": 1,
"is_parallel": true,
"self": 0.0004997880000701116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046138699985931453,
"count": 1,
"is_parallel": true,
"self": 0.00046138699985931453
},
"communicator.exchange": {
"total": 0.06531603500002348,
"count": 1,
"is_parallel": true,
"self": 0.06531603500002348
},
"steps_from_proto": {
"total": 0.001596655999946961,
"count": 1,
"is_parallel": true,
"self": 0.00033773899986044853,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012589170000865124,
"count": 8,
"is_parallel": true,
"self": 0.0012589170000865124
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1173.1417634960574,
"count": 63623,
"is_parallel": true,
"self": 30.247557377036856,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.34549160599113,
"count": 63623,
"is_parallel": true,
"self": 22.34549160599113
},
"communicator.exchange": {
"total": 1030.6586164010116,
"count": 63623,
"is_parallel": true,
"self": 1030.6586164010116
},
"steps_from_proto": {
"total": 89.89009811201777,
"count": 63623,
"is_parallel": true,
"self": 17.401386551964833,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.48871156005293,
"count": 508984,
"is_parallel": true,
"self": 72.48871156005293
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.2700295110037,
"count": 63624,
"self": 2.304609800961316,
"children": {
"process_trajectory": {
"total": 120.32255242204405,
"count": 63624,
"self": 120.07594756104368,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24660486100037815,
"count": 2,
"self": 0.24660486100037815
}
}
},
"_update_policy": {
"total": 532.6428672879983,
"count": 449,
"self": 292.05895458601935,
"children": {
"TorchPPOOptimizer.update": {
"total": 240.583912701979,
"count": 22809,
"self": 240.583912701979
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.231999704032205e-06,
"count": 1,
"self": 1.231999704032205e-06
},
"TrainerController._save_models": {
"total": 0.11988372199994046,
"count": 1,
"self": 0.0020174589999442105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11786626299999625,
"count": 1,
"self": 0.11786626299999625
}
}
}
}
}
}
}