ppo-Pyramids / run_logs /timers.json
SeanLMH's picture
First Push
a965633 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2178608626127243,
"min": 0.2089102417230606,
"max": 1.3913394212722778,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 6514.9111328125,
"min": 6314.10302734375,
"max": 42207.671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989970.0,
"min": 29952.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989970.0,
"min": 29952.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6457516551017761,
"min": -0.07314863055944443,
"max": 0.6649767160415649,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 182.10195922851562,
"min": -17.628820419311523,
"max": 188.8533935546875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011693883687257767,
"min": -0.0015647447435185313,
"max": 0.27065181732177734,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.297675132751465,
"min": -0.4349990487098694,
"max": 64.50035858154297,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06814360083635188,
"min": 0.06661233626142522,
"max": 0.07452236115145029,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9540104117089262,
"min": 0.509838995385424,
"max": 1.0732528871837226,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016412826435130444,
"min": 0.0012507676959355762,
"max": 0.017720301168516188,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22977957009182623,
"min": 0.013319398905371172,
"max": 0.24808421635922664,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4085332448071455e-06,
"min": 7.4085332448071455e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010371946542730004,
"min": 0.00010371946542730004,
"max": 0.0037579399473533994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246947857142856,
"min": 0.10246947857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345727,
"min": 1.3886848,
"max": 2.6526466,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002567009092857144,
"min": 0.0002567009092857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003593812730000001,
"min": 0.003593812730000001,
"max": 0.12527939534000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012457561679184437,
"min": 0.012172956950962543,
"max": 0.4141850471496582,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17440585792064667,
"min": 0.17440585792064667,
"max": 2.8992953300476074,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 294.97938144329896,
"min": 279.94174757281553,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28613.0,
"min": 15984.0,
"max": 33971.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6637711133231823,
"min": -1.0000000521540642,
"max": 1.6992538330646663,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 161.38579799234867,
"min": -27.81880161911249,
"max": 176.72239863872528,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6637711133231823,
"min": -1.0000000521540642,
"max": 1.6992538330646663,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 161.38579799234867,
"min": -27.81880161911249,
"max": 176.72239863872528,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03800263856768671,
"min": 0.03558508694438318,
"max": 8.074447041377425,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6862559410656104,
"min": 3.6862559410656104,
"max": 129.1911526620388,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729844514",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1729846944"
},
"total": 2429.845569266,
"count": 1,
"self": 0.47735549899971375,
"children": {
"run_training.setup": {
"total": 0.053862128999980996,
"count": 1,
"self": 0.053862128999980996
},
"TrainerController.start_learning": {
"total": 2429.314351638,
"count": 1,
"self": 1.4114136349717228,
"children": {
"TrainerController._reset_env": {
"total": 6.106251036000003,
"count": 1,
"self": 6.106251036000003
},
"TrainerController.advance": {
"total": 2421.692463269029,
"count": 64158,
"self": 1.428269589010597,
"children": {
"env_step": {
"total": 1669.0537683600114,
"count": 64158,
"self": 1502.1591891380945,
"children": {
"SubprocessEnvManager._take_step": {
"total": 166.06582740194665,
"count": 64158,
"self": 4.73694529690863,
"children": {
"TorchPolicy.evaluate": {
"total": 161.32888210503802,
"count": 62562,
"self": 161.32888210503802
}
}
},
"workers": {
"total": 0.8287518199703072,
"count": 64158,
"self": 0.0,
"children": {
"worker_root": {
"total": 2423.881590644007,
"count": 64158,
"is_parallel": true,
"self": 1042.7871329410248,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002103437000073427,
"count": 1,
"is_parallel": true,
"self": 0.000673021000125118,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001430415999948309,
"count": 8,
"is_parallel": true,
"self": 0.001430415999948309
}
}
},
"UnityEnvironment.step": {
"total": 0.04922925599998962,
"count": 1,
"is_parallel": true,
"self": 0.0006232499998759522,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004381790000707042,
"count": 1,
"is_parallel": true,
"self": 0.0004381790000707042
},
"communicator.exchange": {
"total": 0.04649763800000528,
"count": 1,
"is_parallel": true,
"self": 0.04649763800000528
},
"steps_from_proto": {
"total": 0.0016701890000376807,
"count": 1,
"is_parallel": true,
"self": 0.00037701399969591876,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001293175000341762,
"count": 8,
"is_parallel": true,
"self": 0.001293175000341762
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1381.094457702982,
"count": 64157,
"is_parallel": true,
"self": 33.18816328195339,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.912418710025122,
"count": 64157,
"is_parallel": true,
"self": 22.912418710025122
},
"communicator.exchange": {
"total": 1226.0896836820461,
"count": 64157,
"is_parallel": true,
"self": 1226.0896836820461
},
"steps_from_proto": {
"total": 98.90419202895737,
"count": 64157,
"is_parallel": true,
"self": 20.525566321235146,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.37862570772222,
"count": 513256,
"is_parallel": true,
"self": 78.37862570772222
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 751.2104253200071,
"count": 64158,
"self": 2.8918697650162812,
"children": {
"process_trajectory": {
"total": 140.04193731999248,
"count": 64158,
"self": 139.74784854999234,
"children": {
"RLTrainer._checkpoint": {
"total": 0.294088770000144,
"count": 2,
"self": 0.294088770000144
}
}
},
"_update_policy": {
"total": 608.2766182349983,
"count": 459,
"self": 313.0003535959776,
"children": {
"TorchPPOOptimizer.update": {
"total": 295.2762646390207,
"count": 22794,
"self": 295.2762646390207
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.090000847005285e-07,
"count": 1,
"self": 8.090000847005285e-07
},
"TrainerController._save_models": {
"total": 0.1042228889996295,
"count": 1,
"self": 0.0014271189993451117,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10279577000028439,
"count": 1,
"self": 0.10279577000028439
}
}
}
}
}
}
}