PyramidsRND / run_logs /timers.json
aumy's picture
First push
fb568b4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7151013612747192,
"min": 0.7151013612747192,
"max": 1.4768272638320923,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 21441.599609375,
"min": 21441.599609375,
"max": 44801.03125,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89923.0,
"min": 29930.0,
"max": 89923.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89923.0,
"min": 29930.0,
"max": 89923.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08861732482910156,
"min": -0.10084986686706543,
"max": -0.03741351142525673,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -21.268157958984375,
"min": -24.30481719970703,
"max": -8.867002487182617,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.2454124093055725,
"min": 0.2454124093055725,
"max": 0.5892899632453918,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 58.89897918701172,
"min": 58.89897918701172,
"max": 139.66172790527344,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06920775743001878,
"min": 0.06920775743001878,
"max": 0.07416160593694575,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8997008465902441,
"min": 0.5191312415586202,
"max": 0.8997008465902441,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0004564289065915861,
"min": 0.0004564289065915861,
"max": 0.01182457747683376,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.005933575785690619,
"min": 0.005933575785690619,
"max": 0.08277204233783632,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.593076699746153e-05,
"min": 7.593076699746153e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000987099970967,
"min": 0.000987099970967,
"max": 0.0018297754900750002,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12531023076923076,
"min": 0.12531023076923076,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.629033,
"min": 1.2868480000000002,
"max": 1.709925,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0025384920538461535,
"min": 0.0025384920538461535,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0330003967,
"min": 0.0330003967,
"max": 0.06104150750000001,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.1585404872894287,
"min": 0.1585404872894287,
"max": 0.6057981848716736,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 2.0610263347625732,
"min": 2.0610263347625732,
"max": 4.24058723449707,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 989.03125,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30969.0,
"min": 16857.0,
"max": 31649.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9999226326903989,
"min": -0.9999226326903989,
"max": -0.8649188012350351,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -30.997601613402367,
"min": -30.997601613402367,
"max": -14.873000890016556,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9999226326903989,
"min": -0.9999226326903989,
"max": -0.8649188012350351,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -30.997601613402367,
"min": -30.997601613402367,
"max": -14.873000890016556,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.860380296745608,
"min": 1.860380296745608,
"max": 11.506214489831644,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 57.671789199113846,
"min": 57.671789199113846,
"max": 195.60564632713795,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704447674",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704447878"
},
"total": 204.19577062300004,
"count": 1,
"self": 0.47653033599999617,
"children": {
"run_training.setup": {
"total": 0.045323929999995016,
"count": 1,
"self": 0.045323929999995016
},
"TrainerController.start_learning": {
"total": 203.67391635700005,
"count": 1,
"self": 0.12931566799397842,
"children": {
"TrainerController._reset_env": {
"total": 2.03130759700025,
"count": 1,
"self": 2.03130759700025
},
"TrainerController.advance": {
"total": 201.4039722590055,
"count": 6293,
"self": 0.14034775600339344,
"children": {
"env_step": {
"total": 137.6886233070113,
"count": 6293,
"self": 124.1414707509748,
"children": {
"SubprocessEnvManager._take_step": {
"total": 13.464441286008423,
"count": 6293,
"self": 0.4770016550132823,
"children": {
"TorchPolicy.evaluate": {
"total": 12.98743963099514,
"count": 6278,
"self": 12.98743963099514
}
}
},
"workers": {
"total": 0.08271127002808498,
"count": 6293,
"self": 0.0,
"children": {
"worker_root": {
"total": 203.07956620701134,
"count": 6293,
"is_parallel": true,
"self": 91.3353706690018,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001778115999968577,
"count": 1,
"is_parallel": true,
"self": 0.0005827370000588417,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011953789999097353,
"count": 8,
"is_parallel": true,
"self": 0.0011953789999097353
}
}
},
"UnityEnvironment.step": {
"total": 0.05097948399998131,
"count": 1,
"is_parallel": true,
"self": 0.0005680190001839946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004939699997521529,
"count": 1,
"is_parallel": true,
"self": 0.0004939699997521529
},
"communicator.exchange": {
"total": 0.048305165999863675,
"count": 1,
"is_parallel": true,
"self": 0.048305165999863675
},
"steps_from_proto": {
"total": 0.0016123290001814894,
"count": 1,
"is_parallel": true,
"self": 0.00034772300023178104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012646059999497083,
"count": 8,
"is_parallel": true,
"self": 0.0012646059999497083
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 111.74419553800954,
"count": 6292,
"is_parallel": true,
"self": 3.515597676053403,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.7053540349907053,
"count": 6292,
"is_parallel": true,
"self": 2.7053540349907053
},
"communicator.exchange": {
"total": 95.20404506799696,
"count": 6292,
"is_parallel": true,
"self": 95.20404506799696
},
"steps_from_proto": {
"total": 10.319198758968469,
"count": 6292,
"is_parallel": true,
"self": 2.0688690879896967,
"children": {
"_process_rank_one_or_two_observation": {
"total": 8.250329670978772,
"count": 50336,
"is_parallel": true,
"self": 8.250329670978772
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 63.57500119599081,
"count": 6293,
"self": 0.18322160399384302,
"children": {
"process_trajectory": {
"total": 12.284347761996742,
"count": 6293,
"self": 12.284347761996742
},
"_update_policy": {
"total": 51.10743183000022,
"count": 35,
"self": 29.932208054012335,
"children": {
"TorchPPOOptimizer.update": {
"total": 21.175223775987888,
"count": 2319,
"self": 21.175223775987888
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100000190490391e-06,
"count": 1,
"self": 1.100000190490391e-06
},
"TrainerController._save_models": {
"total": 0.10931973300012032,
"count": 1,
"self": 0.001582632999998168,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10773710000012215,
"count": 1,
"self": 0.10773710000012215
}
}
}
}
}
}
}