ppo-Pyramids / run_logs /timers.json
zbenmo's picture
First Push
d522316
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4627642035484314,
"min": 0.4409801959991455,
"max": 1.341817855834961,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13964.373046875,
"min": 13158.8486328125,
"max": 40705.38671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989897.0,
"min": 29921.0,
"max": 989897.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989897.0,
"min": 29921.0,
"max": 989897.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4127635657787323,
"min": -0.13971538841724396,
"max": 0.48504236340522766,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 109.79510498046875,
"min": -33.671409606933594,
"max": 132.901611328125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.003871229011565447,
"min": -0.04230058565735817,
"max": 0.19351845979690552,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.0297468900680542,
"min": -11.125054359436035,
"max": 46.63794708251953,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07077203101241447,
"min": 0.06371498836202942,
"max": 0.07364191300612653,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9908084341738026,
"min": 0.4977809943515679,
"max": 1.072147824968709,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014744577863751274,
"min": 0.0001795210303102424,
"max": 0.015084131373992064,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20642409009251783,
"min": 0.0023337733940331515,
"max": 0.21995367640806318,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.25765472367143e-06,
"min": 7.25765472367143e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010160716613140001,
"min": 0.00010160716613140001,
"max": 0.003493490535503199,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241918571428572,
"min": 0.10241918571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338686,
"min": 1.3886848,
"max": 2.4644968000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002516766528571429,
"min": 0.0002516766528571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035234731400000004,
"min": 0.0035234731400000004,
"max": 0.11646323032,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007160759065300226,
"min": 0.007160759065300226,
"max": 0.38711363077163696,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10025062412023544,
"min": 0.10025062412023544,
"max": 2.7097954750061035,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 415.97058823529414,
"min": 404.6714285714286,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28286.0,
"min": 16848.0,
"max": 33052.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4115304098181103,
"min": -0.9998750519007444,
"max": 1.4490666473689287,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 97.39559827744961,
"min": -31.996001660823822,
"max": 105.2099988758564,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4115304098181103,
"min": -0.9998750519007444,
"max": 1.4490666473689287,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 97.39559827744961,
"min": -31.996001660823822,
"max": 105.2099988758564,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031783506822345466,
"min": 0.031783506822345466,
"max": 7.247509527732344,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.1930619707418373,
"min": 2.1930619707418373,
"max": 123.20766197144985,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673872494",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673874646"
},
"total": 2151.300768894,
"count": 1,
"self": 0.4776521500002673,
"children": {
"run_training.setup": {
"total": 0.1046271870000055,
"count": 1,
"self": 0.1046271870000055
},
"TrainerController.start_learning": {
"total": 2150.718489557,
"count": 1,
"self": 1.3085009829819683,
"children": {
"TrainerController._reset_env": {
"total": 6.483123084999988,
"count": 1,
"self": 6.483123084999988
},
"TrainerController.advance": {
"total": 2142.8353877780187,
"count": 63662,
"self": 1.3067432980437843,
"children": {
"env_step": {
"total": 1475.8530671268281,
"count": 63662,
"self": 1369.6200052738232,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.39229146209345,
"count": 63662,
"self": 4.343631100118728,
"children": {
"TorchPolicy.evaluate": {
"total": 101.04866036197473,
"count": 62551,
"self": 34.1180743219079,
"children": {
"TorchPolicy.sample_actions": {
"total": 66.93058604006683,
"count": 62551,
"self": 66.93058604006683
}
}
}
}
},
"workers": {
"total": 0.8407703909115298,
"count": 63662,
"self": 0.0,
"children": {
"worker_root": {
"total": 2146.1159338839834,
"count": 63662,
"is_parallel": true,
"self": 877.1768949558455,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016425389999312756,
"count": 1,
"is_parallel": true,
"self": 0.0005754570001954562,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010670819997358194,
"count": 8,
"is_parallel": true,
"self": 0.0010670819997358194
}
}
},
"UnityEnvironment.step": {
"total": 0.04290966799999296,
"count": 1,
"is_parallel": true,
"self": 0.0005392449997998483,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043908299994654953,
"count": 1,
"is_parallel": true,
"self": 0.00043908299994654953
},
"communicator.exchange": {
"total": 0.040206208000199695,
"count": 1,
"is_parallel": true,
"self": 0.040206208000199695
},
"steps_from_proto": {
"total": 0.0017251320000468695,
"count": 1,
"is_parallel": true,
"self": 0.0004243489988766669,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013007830011702026,
"count": 8,
"is_parallel": true,
"self": 0.0013007830011702026
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1268.939038928138,
"count": 63661,
"is_parallel": true,
"self": 28.35806869708904,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.245018420994256,
"count": 63661,
"is_parallel": true,
"self": 22.245018420994256
},
"communicator.exchange": {
"total": 1118.537521210108,
"count": 63661,
"is_parallel": true,
"self": 1118.537521210108
},
"steps_from_proto": {
"total": 99.79843059994664,
"count": 63661,
"is_parallel": true,
"self": 22.031519462943834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.76691113700281,
"count": 509288,
"is_parallel": true,
"self": 77.76691113700281
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 665.6755773531468,
"count": 63662,
"self": 2.5139619791998484,
"children": {
"process_trajectory": {
"total": 144.26169371494416,
"count": 63662,
"self": 144.06968686594428,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19200684899988119,
"count": 2,
"self": 0.19200684899988119
}
}
},
"_update_policy": {
"total": 518.8999216590028,
"count": 451,
"self": 200.98487138003566,
"children": {
"TorchPPOOptimizer.update": {
"total": 317.9150502789671,
"count": 22734,
"self": 317.9150502789671
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.269997462979518e-07,
"count": 1,
"self": 9.269997462979518e-07
},
"TrainerController._save_models": {
"total": 0.09147678399949655,
"count": 1,
"self": 0.0014444429989453056,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09003234100055124,
"count": 1,
"self": 0.09003234100055124
}
}
}
}
}
}
}