ppo-Pyramids / run_logs /timers.json
kostas-c's picture
First Push
0769b4e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3406534194946289,
"min": 0.3406534194946289,
"max": 1.405088186264038,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10165.09765625,
"min": 10165.09765625,
"max": 42624.75390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989921.0,
"min": 29933.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989921.0,
"min": 29933.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5199180245399475,
"min": -0.08457911759614944,
"max": 0.5931931734085083,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 141.9376220703125,
"min": -20.383567810058594,
"max": 168.46685791015625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00415923772379756,
"min": -0.017415380105376244,
"max": 0.3918575644493103,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.1354719400405884,
"min": -4.440921783447266,
"max": 94.82952880859375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07063008521666775,
"min": 0.06460407745916166,
"max": 0.07291541331742427,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0594512782500163,
"min": 0.5104078932219699,
"max": 1.0594512782500163,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016195279567767226,
"min": 0.0016695348747795098,
"max": 0.017797904802830004,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2429291935165084,
"min": 0.023373488246913137,
"max": 0.2572308838308684,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.602397465899996e-06,
"min": 7.602397465899996e-06,
"max": 0.0002952337301601857,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011403596198849994,
"min": 0.00011403596198849994,
"max": 0.004027886557371199,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025341,
"min": 0.1025341,
"max": 0.19841124285714287,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5380115,
"min": 1.3888787,
"max": 2.8426287999999995,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002631565899999999,
"min": 0.0002631565899999999,
"max": 0.009841283161428571,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003947348849999998,
"min": 0.003947348849999998,
"max": 0.13427861712,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01303520705550909,
"min": 0.012413011863827705,
"max": 0.5985026955604553,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19552810490131378,
"min": 0.17378216981887817,
"max": 4.189518928527832,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 331.7931034482759,
"min": 313.4591836734694,
"max": 984.2352941176471,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28866.0,
"min": 16732.0,
"max": 34270.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.645213767201051,
"min": -0.8675294649951598,
"max": 1.6489355345567067,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 143.13359774649143,
"min": -26.300201825797558,
"max": 157.2775982543826,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.645213767201051,
"min": -0.8675294649951598,
"max": 1.6489355345567067,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 143.13359774649143,
"min": -26.300201825797558,
"max": 157.2775982543826,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.045221601152891595,
"min": 0.04210041879795509,
"max": 11.367852910476572,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9342793003015686,
"min": 3.704836854220048,
"max": 193.25349947810173,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1764699544",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1764701822"
},
"total": 2277.629034937,
"count": 1,
"self": 0.6328944790002424,
"children": {
"run_training.setup": {
"total": 0.024123252999970646,
"count": 1,
"self": 0.024123252999970646
},
"TrainerController.start_learning": {
"total": 2276.972017205,
"count": 1,
"self": 1.4153843849862824,
"children": {
"TrainerController._reset_env": {
"total": 2.5828647670000464,
"count": 1,
"self": 2.5828647670000464
},
"TrainerController.advance": {
"total": 2272.8898244910133,
"count": 64026,
"self": 1.4401307780753996,
"children": {
"env_step": {
"total": 1619.1050649840186,
"count": 64026,
"self": 1467.056956512078,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.18289491996825,
"count": 64026,
"self": 4.6527737450246605,
"children": {
"TorchPolicy.evaluate": {
"total": 146.5301211749436,
"count": 62560,
"self": 146.5301211749436
}
}
},
"workers": {
"total": 0.8652135519723743,
"count": 64026,
"self": 0.0,
"children": {
"worker_root": {
"total": 2270.3460559620357,
"count": 64026,
"is_parallel": true,
"self": 921.7507051360647,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023111739999421843,
"count": 1,
"is_parallel": true,
"self": 0.0007488569997349259,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015623170002072584,
"count": 8,
"is_parallel": true,
"self": 0.0015623170002072584
}
}
},
"UnityEnvironment.step": {
"total": 0.06888305300003594,
"count": 1,
"is_parallel": true,
"self": 0.0006714720000218222,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006350720000227739,
"count": 1,
"is_parallel": true,
"self": 0.0006350720000227739
},
"communicator.exchange": {
"total": 0.06549870399999236,
"count": 1,
"is_parallel": true,
"self": 0.06549870399999236
},
"steps_from_proto": {
"total": 0.002077804999998989,
"count": 1,
"is_parallel": true,
"self": 0.0004070159998263989,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00167078900017259,
"count": 8,
"is_parallel": true,
"self": 0.00167078900017259
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1348.595350825971,
"count": 64025,
"is_parallel": true,
"self": 33.64134573299634,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.102321807917633,
"count": 64025,
"is_parallel": true,
"self": 23.102321807917633
},
"communicator.exchange": {
"total": 1184.3715742820411,
"count": 64025,
"is_parallel": true,
"self": 1184.3715742820411
},
"steps_from_proto": {
"total": 107.48010900301597,
"count": 64025,
"is_parallel": true,
"self": 22.67541291589123,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.80469608712474,
"count": 512200,
"is_parallel": true,
"self": 84.80469608712474
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 652.3446287289196,
"count": 64026,
"self": 2.9049180068948317,
"children": {
"process_trajectory": {
"total": 125.04465482302362,
"count": 64026,
"self": 124.85944841702371,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18520640599990656,
"count": 2,
"self": 0.18520640599990656
}
}
},
"_update_policy": {
"total": 524.3950558990011,
"count": 462,
"self": 290.5515272579962,
"children": {
"TorchPPOOptimizer.update": {
"total": 233.84352864100492,
"count": 22749,
"self": 233.84352864100492
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.310001587437e-07,
"count": 1,
"self": 9.310001587437e-07
},
"TrainerController._save_models": {
"total": 0.0839426310003546,
"count": 1,
"self": 0.0009312890001638152,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08301134200019078,
"count": 1,
"self": 0.08301134200019078
}
}
}
}
}
}
}