unity-Pyramid / run_logs /timers.json
mertyazan's picture
First Push
c06e514
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5775616765022278,
"min": 0.5775616765022278,
"max": 1.5113048553466797,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17206.716796875,
"min": 17206.716796875,
"max": 45846.9453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989949.0,
"min": 29927.0,
"max": 989949.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989949.0,
"min": 29927.0,
"max": 989949.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4286586046218872,
"min": -0.1749507039785385,
"max": 0.4286586046218872,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 114.02318572998047,
"min": -41.46331787109375,
"max": 114.02318572998047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.9057058095932007,
"min": -0.054469265043735504,
"max": 0.9057058095932007,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 240.91773986816406,
"min": -14.21647834777832,
"max": 240.91773986816406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06915697304816285,
"min": 0.06275276394345411,
"max": 0.0740482274990627,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9681976226742799,
"min": 0.5183375924934389,
"max": 1.0326431291383307,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.04977741228876842,
"min": 9.007309141829469e-05,
"max": 0.07835415217954358,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.6968837720427579,
"min": 0.0012610232798561257,
"max": 1.0969581305136102,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.535883202357144e-06,
"min": 7.535883202357144e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010550236483300001,
"min": 0.00010550236483300001,
"max": 0.0033771178742940996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251192857142856,
"min": 0.10251192857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351669999999999,
"min": 1.3886848,
"max": 2.4858405,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002609416642857144,
"min": 0.0002609416642857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036531833000000014,
"min": 0.0036531833000000014,
"max": 0.11258801941,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005902671255171299,
"min": 0.005859593395143747,
"max": 0.28719326853752136,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08263739943504333,
"min": 0.08203430473804474,
"max": 2.010352849960327,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 452.0153846153846,
"min": 452.0153846153846,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29381.0,
"min": 16854.0,
"max": 32652.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4248738139867783,
"min": -0.9999500517733395,
"max": 1.4248738139867783,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 92.61679790914059,
"min": -31.998401656746864,
"max": 92.61679790914059,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4248738139867783,
"min": -0.9999500517733395,
"max": 1.4248738139867783,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 92.61679790914059,
"min": -31.998401656746864,
"max": 92.61679790914059,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027363571538948094,
"min": 0.027363571538948094,
"max": 5.184302484287935,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7786321500316262,
"min": 1.7402179265627638,
"max": 88.1331422328949,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677357298",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677359656"
},
"total": 2358.2258431970004,
"count": 1,
"self": 0.47833200800005216,
"children": {
"run_training.setup": {
"total": 0.17447656800004552,
"count": 1,
"self": 0.17447656800004552
},
"TrainerController.start_learning": {
"total": 2357.5730346210003,
"count": 1,
"self": 1.6500106140028947,
"children": {
"TrainerController._reset_env": {
"total": 7.329839622999998,
"count": 1,
"self": 7.329839622999998
},
"TrainerController.advance": {
"total": 2348.5055612379974,
"count": 63480,
"self": 1.7567295518820174,
"children": {
"env_step": {
"total": 1593.678411213048,
"count": 63480,
"self": 1462.0734517150947,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.56525121203072,
"count": 63480,
"self": 5.238861278090326,
"children": {
"TorchPolicy.evaluate": {
"total": 125.32638993394039,
"count": 62556,
"self": 41.781766839927286,
"children": {
"TorchPolicy.sample_actions": {
"total": 83.5446230940131,
"count": 62556,
"self": 83.5446230940131
}
}
}
}
},
"workers": {
"total": 1.039708285922643,
"count": 63480,
"self": 0.0,
"children": {
"worker_root": {
"total": 2351.4595876139333,
"count": 63480,
"is_parallel": true,
"self": 1020.6273704679409,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002801509999926566,
"count": 1,
"is_parallel": true,
"self": 0.0009315949998836004,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018699150000429654,
"count": 8,
"is_parallel": true,
"self": 0.0018699150000429654
}
}
},
"UnityEnvironment.step": {
"total": 0.04795191700009127,
"count": 1,
"is_parallel": true,
"self": 0.000554534999992029,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005036339998696349,
"count": 1,
"is_parallel": true,
"self": 0.0005036339998696349
},
"communicator.exchange": {
"total": 0.045205580000128975,
"count": 1,
"is_parallel": true,
"self": 0.045205580000128975
},
"steps_from_proto": {
"total": 0.0016881680001006316,
"count": 1,
"is_parallel": true,
"self": 0.0004210809997857723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012670870003148593,
"count": 8,
"is_parallel": true,
"self": 0.0012670870003148593
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1330.8322171459924,
"count": 63479,
"is_parallel": true,
"self": 32.76466078892622,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.546398870078974,
"count": 63479,
"is_parallel": true,
"self": 25.546398870078974
},
"communicator.exchange": {
"total": 1170.4781092179742,
"count": 63479,
"is_parallel": true,
"self": 1170.4781092179742
},
"steps_from_proto": {
"total": 102.04304826901307,
"count": 63479,
"is_parallel": true,
"self": 25.238047127010987,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.80500114200208,
"count": 507832,
"is_parallel": true,
"self": 76.80500114200208
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 753.0704204730673,
"count": 63480,
"self": 2.9856970430234924,
"children": {
"process_trajectory": {
"total": 172.52123982104172,
"count": 63480,
"self": 172.33289268404155,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18834713700016437,
"count": 2,
"self": 0.18834713700016437
}
}
},
"_update_policy": {
"total": 577.5634836090021,
"count": 451,
"self": 222.27255498505883,
"children": {
"TorchPPOOptimizer.update": {
"total": 355.2909286239433,
"count": 22770,
"self": 355.2909286239433
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.679999154992402e-07,
"count": 1,
"self": 8.679999154992402e-07
},
"TrainerController._save_models": {
"total": 0.08762227799979883,
"count": 1,
"self": 0.001473958999667957,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08614831900013087,
"count": 1,
"self": 0.08614831900013087
}
}
}
}
}
}
}