PyramidsRND / run_logs /timers.json
dcduplooy's picture
First Push
1eb58d1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6203775405883789,
"min": 0.5921075344085693,
"max": 1.4617739915847778,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18502.140625,
"min": 17621.12109375,
"max": 44344.375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989929.0,
"min": 29927.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989929.0,
"min": 29927.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3301307260990143,
"min": -0.11011659353971481,
"max": 0.3418947458267212,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 86.49424743652344,
"min": -26.427982330322266,
"max": 89.23452758789062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.4124135673046112,
"min": -0.4124135673046112,
"max": 0.6010321378707886,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -108.05235290527344,
"min": -108.05235290527344,
"max": 142.44461059570312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06803671210460986,
"min": 0.06449794516070616,
"max": 0.07551944963709692,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.952513969464538,
"min": 0.6041555970967754,
"max": 1.0581841811951538,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.03798105868086817,
"min": 5.681434177738248e-05,
"max": 0.03798105868086817,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.5317348215321545,
"min": 0.0007954007848833547,
"max": 0.5317348215321545,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5887474704500035e-06,
"min": 7.5887474704500035e-06,
"max": 0.00029484281421906253,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010624246458630004,
"min": 0.00010624246458630004,
"max": 0.003260235813254799,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252955000000001,
"min": 0.10252955000000001,
"max": 0.1982809375,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354137000000002,
"min": 1.4354137000000002,
"max": 2.3867452000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002627020450000001,
"min": 0.0002627020450000001,
"max": 0.00982826565625,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036778286300000016,
"min": 0.0036778286300000016,
"max": 0.10869584548,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014594855718314648,
"min": 0.014594855718314648,
"max": 0.4680042266845703,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20432798564434052,
"min": 0.20432798564434052,
"max": 3.7440338134765625,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 538.1090909090909,
"min": 532.3,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29596.0,
"min": 16470.0,
"max": 32491.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2072799697518348,
"min": -0.9999250522814691,
"max": 1.2072799697518348,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 66.40039833635092,
"min": -31.99760167300701,
"max": 68.05659875273705,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2072799697518348,
"min": -0.9999250522814691,
"max": 1.2072799697518348,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 66.40039833635092,
"min": -31.99760167300701,
"max": 68.05659875273705,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0810441904023967,
"min": 0.0810441904023967,
"max": 9.820225104689598,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.4574304721318185,
"min": 4.3974466127110645,
"max": 166.94382677972317,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678058359",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678060616"
},
"total": 2256.701269483,
"count": 1,
"self": 0.9685150730001624,
"children": {
"run_training.setup": {
"total": 0.11976122900000519,
"count": 1,
"self": 0.11976122900000519
},
"TrainerController.start_learning": {
"total": 2255.6129931809996,
"count": 1,
"self": 1.5947874490402683,
"children": {
"TrainerController._reset_env": {
"total": 10.97970914199999,
"count": 1,
"self": 10.97970914199999
},
"TrainerController.advance": {
"total": 2242.950974096959,
"count": 63324,
"self": 1.7236818649334964,
"children": {
"env_step": {
"total": 1482.4858018340062,
"count": 63324,
"self": 1350.9526289549794,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.51011746399388,
"count": 63324,
"self": 5.22535747197071,
"children": {
"TorchPolicy.evaluate": {
"total": 125.28475999202317,
"count": 62562,
"self": 41.99029217405507,
"children": {
"TorchPolicy.sample_actions": {
"total": 83.2944678179681,
"count": 62562,
"self": 83.2944678179681
}
}
}
}
},
"workers": {
"total": 1.0230554150328999,
"count": 63324,
"self": 0.0,
"children": {
"worker_root": {
"total": 2249.971282052981,
"count": 63324,
"is_parallel": true,
"self": 1027.5734089779926,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007678671000007853,
"count": 1,
"is_parallel": true,
"self": 0.003839840000011918,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0038388309999959347,
"count": 8,
"is_parallel": true,
"self": 0.0038388309999959347
}
}
},
"UnityEnvironment.step": {
"total": 0.045869123999978,
"count": 1,
"is_parallel": true,
"self": 0.0005193089999693257,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047818699999879755,
"count": 1,
"is_parallel": true,
"self": 0.00047818699999879755
},
"communicator.exchange": {
"total": 0.043165410000028714,
"count": 1,
"is_parallel": true,
"self": 0.043165410000028714
},
"steps_from_proto": {
"total": 0.0017062179999811633,
"count": 1,
"is_parallel": true,
"self": 0.0004345999999486594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001271618000032504,
"count": 8,
"is_parallel": true,
"self": 0.001271618000032504
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1222.3978730749886,
"count": 63323,
"is_parallel": true,
"self": 33.344560645094134,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.96165070698396,
"count": 63323,
"is_parallel": true,
"self": 23.96165070698396
},
"communicator.exchange": {
"total": 1068.904856616969,
"count": 63323,
"is_parallel": true,
"self": 1068.904856616969
},
"steps_from_proto": {
"total": 96.1868051059414,
"count": 63323,
"is_parallel": true,
"self": 23.781626416976223,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.40517868896518,
"count": 506584,
"is_parallel": true,
"self": 72.40517868896518
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 758.7414903980191,
"count": 63324,
"self": 3.0133254709900257,
"children": {
"process_trajectory": {
"total": 167.25818979302812,
"count": 63324,
"self": 167.0552653000277,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2029244930004097,
"count": 2,
"self": 0.2029244930004097
}
}
},
"_update_policy": {
"total": 588.469975134001,
"count": 443,
"self": 227.74845945697973,
"children": {
"TorchPPOOptimizer.update": {
"total": 360.7215156770212,
"count": 22749,
"self": 360.7215156770212
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.420001904596575e-07,
"count": 1,
"self": 8.420001904596575e-07
},
"TrainerController._save_models": {
"total": 0.08752165100031561,
"count": 1,
"self": 0.0015180310006144282,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08600361999970119,
"count": 1,
"self": 0.08600361999970119
}
}
}
}
}
}
}