Pyramids / run_logs /timers.json
ashkid's picture
Update
cbbcb98 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7448220252990723,
"min": 0.7285001277923584,
"max": 1.517684817314148,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 22380.412109375,
"min": 21759.201171875,
"max": 46040.48828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2869012653827667,
"min": -0.10125422477722168,
"max": 0.2869012653827667,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 73.15982055664062,
"min": -24.301013946533203,
"max": 73.15982055664062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.07460573315620422,
"min": -0.02357194945216179,
"max": 0.17345908284187317,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 19.02446174621582,
"min": -5.987275123596191,
"max": 41.63018035888672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07139145691177341,
"min": 0.06448559831664506,
"max": 0.07314192422783027,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9994803967648277,
"min": 0.49657393146263457,
"max": 1.0739827966671347,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012505578871198289,
"min": 5.7939525677462445e-05,
"max": 0.012906822378821797,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17507810419677605,
"min": 0.0008111533594844742,
"max": 0.18069551330350517,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.359397546900002e-06,
"min": 7.359397546900002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010303156565660002,
"min": 0.00010303156565660002,
"max": 0.0035077913307363,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245309999999999,
"min": 0.10245309999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343434,
"min": 1.3691136000000002,
"max": 2.5692637,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025506469000000006,
"min": 0.00025506469000000006,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003570905660000001,
"min": 0.003570905660000001,
"max": 0.11694944363000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010814101435244083,
"min": 0.010814101435244083,
"max": 0.32483014464378357,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15139742195606232,
"min": 0.15139742195606232,
"max": 2.273811101913452,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 577.56,
"min": 537.1206896551724,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28878.0,
"min": 15984.0,
"max": 32921.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.022295972406864,
"min": -1.0000000521540642,
"max": 1.255917211673383,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 51.11479862034321,
"min": -32.000001668930054,
"max": 72.84319827705622,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.022295972406864,
"min": -1.0000000521540642,
"max": 1.255917211673383,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 51.11479862034321,
"min": -32.000001668930054,
"max": 72.84319827705622,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06501978911168407,
"min": 0.06294771162182425,
"max": 6.655797706916928,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.250989455584204,
"min": 3.250989455584204,
"max": 106.49276331067085,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740055673",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740057926"
},
"total": 2253.4550553910003,
"count": 1,
"self": 0.5767080430000533,
"children": {
"run_training.setup": {
"total": 0.024090635999982624,
"count": 1,
"self": 0.024090635999982624
},
"TrainerController.start_learning": {
"total": 2252.8542567120003,
"count": 1,
"self": 1.5792767720540724,
"children": {
"TrainerController._reset_env": {
"total": 2.215807681000115,
"count": 1,
"self": 2.215807681000115
},
"TrainerController.advance": {
"total": 2248.9654714349463,
"count": 63329,
"self": 1.5896894849674936,
"children": {
"env_step": {
"total": 1553.667692709959,
"count": 63329,
"self": 1382.184583266971,
"children": {
"SubprocessEnvManager._take_step": {
"total": 170.5783961529412,
"count": 63329,
"self": 5.0504912878982395,
"children": {
"TorchPolicy.evaluate": {
"total": 165.52790486504296,
"count": 62551,
"self": 165.52790486504296
}
}
},
"workers": {
"total": 0.9047132900468569,
"count": 63329,
"self": 0.0,
"children": {
"worker_root": {
"total": 2247.0314005250066,
"count": 63329,
"is_parallel": true,
"self": 987.5733478270295,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002669823000132965,
"count": 1,
"is_parallel": true,
"self": 0.0009627449999243254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017070780002086394,
"count": 8,
"is_parallel": true,
"self": 0.0017070780002086394
}
}
},
"UnityEnvironment.step": {
"total": 0.049093162999952256,
"count": 1,
"is_parallel": true,
"self": 0.0005527280000023893,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004589650000070833,
"count": 1,
"is_parallel": true,
"self": 0.0004589650000070833
},
"communicator.exchange": {
"total": 0.04629354799999419,
"count": 1,
"is_parallel": true,
"self": 0.04629354799999419
},
"steps_from_proto": {
"total": 0.001787921999948594,
"count": 1,
"is_parallel": true,
"self": 0.0003774730000714044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014104489998771896,
"count": 8,
"is_parallel": true,
"self": 0.0014104489998771896
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1259.458052697977,
"count": 63328,
"is_parallel": true,
"self": 33.37086270199438,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.274842388984553,
"count": 63328,
"is_parallel": true,
"self": 24.274842388984553
},
"communicator.exchange": {
"total": 1100.2576939179953,
"count": 63328,
"is_parallel": true,
"self": 1100.2576939179953
},
"steps_from_proto": {
"total": 101.55465368900286,
"count": 63328,
"is_parallel": true,
"self": 21.02511819775077,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.52953549125209,
"count": 506624,
"is_parallel": true,
"self": 80.52953549125209
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 693.7080892400197,
"count": 63329,
"self": 2.956233343065378,
"children": {
"process_trajectory": {
"total": 131.99612444395257,
"count": 63329,
"self": 131.7814221969527,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2147022469998774,
"count": 2,
"self": 0.2147022469998774
}
}
},
"_update_policy": {
"total": 558.7557314530018,
"count": 445,
"self": 306.18615769601183,
"children": {
"TorchPPOOptimizer.update": {
"total": 252.56957375698994,
"count": 22761,
"self": 252.56957375698994
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.57999873207882e-07,
"count": 1,
"self": 9.57999873207882e-07
},
"TrainerController._save_models": {
"total": 0.09369986599995173,
"count": 1,
"self": 0.0012924860002385685,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09240737999971316,
"count": 1,
"self": 0.09240737999971316
}
}
}
}
}
}
}