Pyramids / run_logs /timers.json
usix79's picture
initial commit
93c8628
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.31614795327186584,
"min": 0.3133101761341095,
"max": 1.4786099195480347,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9438.9130859375,
"min": 9294.033203125,
"max": 44855.109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989905.0,
"min": 29928.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989905.0,
"min": 29928.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5344586372375488,
"min": -0.10325628519058228,
"max": 0.5344586372375488,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 148.0450439453125,
"min": -24.884765625,
"max": 148.0450439453125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018666913732886314,
"min": -0.026318447664380074,
"max": 0.2914782166481018,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.1707353591918945,
"min": -7.132299423217773,
"max": 69.08033752441406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06856968893650142,
"min": 0.06551236047914348,
"max": 0.07410694344712065,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.95997564511102,
"min": 0.5744742576799031,
"max": 1.1000188456719295,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015221073412768809,
"min": 0.00042303003547693874,
"max": 0.01620259309233266,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21309502777876332,
"min": 0.005076360425723265,
"max": 0.22683630329265725,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.257847580750001e-06,
"min": 7.257847580750001e-06,
"max": 0.0002952102015966,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010160986613050001,
"min": 0.00010160986613050001,
"max": 0.0033821288726238003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241925,
"min": 0.10241925,
"max": 0.1984034,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338695000000001,
"min": 1.4338695000000001,
"max": 2.5273762,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000251683075,
"min": 0.000251683075,
"max": 0.00984049966,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00352356305,
"min": 0.00352356305,
"max": 0.11276488238,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011838098056614399,
"min": 0.011768247000873089,
"max": 0.37937092781066895,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16573336720466614,
"min": 0.16573336720466614,
"max": 3.0349674224853516,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 349.79787234042556,
"min": 349.79787234042556,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32881.0,
"min": 15960.0,
"max": 32881.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5926515939094688,
"min": -0.9999750526621938,
"max": 1.6096999775140712,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 148.1165982335806,
"min": -31.9992016851902,
"max": 148.1165982335806,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5926515939094688,
"min": -0.9999750526621938,
"max": 1.6096999775140712,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 148.1165982335806,
"min": -31.9992016851902,
"max": 148.1165982335806,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04206799990522565,
"min": 0.04206799990522565,
"max": 7.765598534606397,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9123239911859855,
"min": 3.395062978961505,
"max": 124.24957655370235,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683104108",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683107613"
},
"total": 3504.563691634,
"count": 1,
"self": 0.6437886300000173,
"children": {
"run_training.setup": {
"total": 0.05805388299995684,
"count": 1,
"self": 0.05805388299995684
},
"TrainerController.start_learning": {
"total": 3503.861849121,
"count": 1,
"self": 2.3233595340057036,
"children": {
"TrainerController._reset_env": {
"total": 1.4399941530000433,
"count": 1,
"self": 1.4399941530000433
},
"TrainerController.advance": {
"total": 3499.975233230994,
"count": 63713,
"self": 2.3354660000782133,
"children": {
"env_step": {
"total": 2315.088577250976,
"count": 63713,
"self": 2171.8654195729623,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.8018996558967,
"count": 63713,
"self": 7.1924239678699,
"children": {
"TorchPolicy.evaluate": {
"total": 134.6094756880268,
"count": 62553,
"self": 134.6094756880268
}
}
},
"workers": {
"total": 1.4212580221169446,
"count": 63713,
"self": 0.0,
"children": {
"worker_root": {
"total": 3496.299039247917,
"count": 63713,
"is_parallel": true,
"self": 1499.2656022928513,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002761974999884842,
"count": 1,
"is_parallel": true,
"self": 0.0006400569996003469,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002121918000284495,
"count": 8,
"is_parallel": true,
"self": 0.002121918000284495
}
}
},
"UnityEnvironment.step": {
"total": 0.11853186899998036,
"count": 1,
"is_parallel": true,
"self": 0.0006631400001424481,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004887329998837231,
"count": 1,
"is_parallel": true,
"self": 0.0004887329998837231
},
"communicator.exchange": {
"total": 0.11522406699987187,
"count": 1,
"is_parallel": true,
"self": 0.11522406699987187
},
"steps_from_proto": {
"total": 0.002155929000082324,
"count": 1,
"is_parallel": true,
"self": 0.0003899770001680736,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017659519999142503,
"count": 8,
"is_parallel": true,
"self": 0.0017659519999142503
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1997.0334369550656,
"count": 63712,
"is_parallel": true,
"self": 46.68469840389889,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.003006118963867,
"count": 63712,
"is_parallel": true,
"self": 27.003006118963867
},
"communicator.exchange": {
"total": 1785.961976540006,
"count": 63712,
"is_parallel": true,
"self": 1785.961976540006
},
"steps_from_proto": {
"total": 137.38375589219686,
"count": 63712,
"is_parallel": true,
"self": 29.385367073937005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 107.99838881825985,
"count": 509696,
"is_parallel": true,
"self": 107.99838881825985
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1182.5511899799399,
"count": 63713,
"self": 4.577144052903577,
"children": {
"process_trajectory": {
"total": 150.1377383950453,
"count": 63713,
"self": 149.84522223804515,
"children": {
"RLTrainer._checkpoint": {
"total": 0.29251615700013645,
"count": 2,
"self": 0.29251615700013645
}
}
},
"_update_policy": {
"total": 1027.836307531991,
"count": 453,
"self": 425.7108885999826,
"children": {
"TorchPPOOptimizer.update": {
"total": 602.1254189320084,
"count": 22890,
"self": 602.1254189320084
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3250000847619958e-06,
"count": 1,
"self": 1.3250000847619958e-06
},
"TrainerController._save_models": {
"total": 0.1232608780001101,
"count": 1,
"self": 0.002079774000776524,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12118110399933357,
"count": 1,
"self": 0.12118110399933357
}
}
}
}
}
}
}