ppo-Pyramids / run_logs /timers.json
danlindb's picture
First Push
95de9a3
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43044039607048035,
"min": 0.4232605993747711,
"max": 1.2874609231948853,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12892.55078125,
"min": 12596.2353515625,
"max": 39056.4140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989902.0,
"min": 29963.0,
"max": 989902.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989902.0,
"min": 29963.0,
"max": 989902.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5394355058670044,
"min": -0.11496417224407196,
"max": 0.5394355058670044,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 148.34475708007812,
"min": -27.82132911682129,
"max": 148.34475708007812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.20778167247772217,
"min": -0.2441752552986145,
"max": 0.41229933500289917,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 57.13996124267578,
"min": -63.973915100097656,
"max": 98.12724304199219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0669255613860774,
"min": 0.06242154989463632,
"max": 0.07571350690067083,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9369578594050837,
"min": 0.6792921543429088,
"max": 1.0661665600685712,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01602240451041051,
"min": 0.0002817554371297112,
"max": 0.024683490512307754,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22431366314574713,
"min": 0.0036628206826862454,
"max": 0.34556886717230856,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.510190353778569e-06,
"min": 7.510190353778569e-06,
"max": 0.0002948771017076334,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010514266495289996,
"min": 0.00010514266495289996,
"max": 0.0036334585888471997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025033642857143,
"min": 0.1025033642857143,
"max": 0.19829236666666666,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350471000000002,
"min": 1.4350471000000002,
"max": 2.6111527999999993,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026008609214285707,
"min": 0.00026008609214285707,
"max": 0.00982940743,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003641205289999999,
"min": 0.003641205289999999,
"max": 0.12113416471999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0133809894323349,
"min": 0.0133809894323349,
"max": 0.5212202668190002,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1873338520526886,
"min": 0.1873338520526886,
"max": 4.690982341766357,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 345.7325581395349,
"min": 345.7325581395349,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29733.0,
"min": 17401.0,
"max": 32655.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5839458611958168,
"min": -0.9998774711162813,
"max": 1.5839458611958168,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 134.63539820164442,
"min": -30.99620160460472,
"max": 136.2625983208418,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5839458611958168,
"min": -0.9998774711162813,
"max": 1.5839458611958168,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 134.63539820164442,
"min": -30.99620160460472,
"max": 136.2625983208418,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.047659866179695684,
"min": 0.047659866179695684,
"max": 9.543598460654417,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.051088625274133,
"min": 4.048533490800764,
"max": 171.78477229177952,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702581933",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702584130"
},
"total": 2196.862197849,
"count": 1,
"self": 0.4936809139999241,
"children": {
"run_training.setup": {
"total": 0.05087635700010651,
"count": 1,
"self": 0.05087635700010651
},
"TrainerController.start_learning": {
"total": 2196.317640578,
"count": 1,
"self": 1.356258463949871,
"children": {
"TrainerController._reset_env": {
"total": 3.6979038590000073,
"count": 1,
"self": 3.6979038590000073
},
"TrainerController.advance": {
"total": 2191.18310169105,
"count": 63806,
"self": 1.4198867619961675,
"children": {
"env_step": {
"total": 1558.8987405940118,
"count": 63806,
"self": 1432.0676027180305,
"children": {
"SubprocessEnvManager._take_step": {
"total": 125.99727272798827,
"count": 63806,
"self": 4.73098126990385,
"children": {
"TorchPolicy.evaluate": {
"total": 121.26629145808442,
"count": 62570,
"self": 121.26629145808442
}
}
},
"workers": {
"total": 0.8338651479930377,
"count": 63806,
"self": 0.0,
"children": {
"worker_root": {
"total": 2191.304855199,
"count": 63806,
"is_parallel": true,
"self": 878.1099026559987,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006951646000061373,
"count": 1,
"is_parallel": true,
"self": 0.005762026999946102,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011896190001152718,
"count": 8,
"is_parallel": true,
"self": 0.0011896190001152718
}
}
},
"UnityEnvironment.step": {
"total": 0.0733953180001663,
"count": 1,
"is_parallel": true,
"self": 0.0006574770002316654,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048028699984570267,
"count": 1,
"is_parallel": true,
"self": 0.00048028699984570267
},
"communicator.exchange": {
"total": 0.07066155699999399,
"count": 1,
"is_parallel": true,
"self": 0.07066155699999399
},
"steps_from_proto": {
"total": 0.0015959970000949397,
"count": 1,
"is_parallel": true,
"self": 0.00033455200036769384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012614449997272459,
"count": 8,
"is_parallel": true,
"self": 0.0012614449997272459
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1313.1949525430014,
"count": 63805,
"is_parallel": true,
"self": 35.54912352103702,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.15549091101593,
"count": 63805,
"is_parallel": true,
"self": 24.15549091101593
},
"communicator.exchange": {
"total": 1153.9066822880127,
"count": 63805,
"is_parallel": true,
"self": 1153.9066822880127
},
"steps_from_proto": {
"total": 99.58365582293573,
"count": 63805,
"is_parallel": true,
"self": 19.59839368403709,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.98526213889863,
"count": 510440,
"is_parallel": true,
"self": 79.98526213889863
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 630.8644743350419,
"count": 63806,
"self": 2.753469361000043,
"children": {
"process_trajectory": {
"total": 127.95773141104382,
"count": 63806,
"self": 127.77507564104371,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18265577000011035,
"count": 2,
"self": 0.18265577000011035
}
}
},
"_update_policy": {
"total": 500.15327356299804,
"count": 458,
"self": 296.15334318598934,
"children": {
"TorchPPOOptimizer.update": {
"total": 203.9999303770087,
"count": 22782,
"self": 203.9999303770087
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.297999915550463e-06,
"count": 1,
"self": 1.297999915550463e-06
},
"TrainerController._save_models": {
"total": 0.08037526600037381,
"count": 1,
"self": 0.0013400689999798487,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07903519700039396,
"count": 1,
"self": 0.07903519700039396
}
}
}
}
}
}
}