RYOBEAR's picture
First Push
b855db5
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3485996127128601,
"min": 0.34519270062446594,
"max": 1.4297598600387573,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10212.57421875,
"min": 10212.57421875,
"max": 43373.1953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989943.0,
"min": 29938.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989943.0,
"min": 29938.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5726115107536316,
"min": -0.09041021764278412,
"max": 0.6260767579078674,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 159.75860595703125,
"min": -21.8792724609375,
"max": 177.17971801757812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016080161556601524,
"min": -0.042846694588661194,
"max": 0.537693977355957,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.486364841461182,
"min": -11.182987213134766,
"max": 127.4334716796875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0683570702361166,
"min": 0.06430421630308654,
"max": 0.07329466772102358,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9569989833056324,
"min": 0.48584524377188304,
"max": 1.0696909686084837,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015108441694066351,
"min": 0.0016291479020512643,
"max": 0.015340338628794555,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21151818371692893,
"min": 0.021178922726666435,
"max": 0.22521087773687518,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.518461779592857e-06,
"min": 7.518461779592857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001052584649143,
"min": 0.0001052584649143,
"max": 0.0036359035880321995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250612142857142,
"min": 0.10250612142857142,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350857,
"min": 1.3886848,
"max": 2.617342500000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026036153071428575,
"min": 0.00026036153071428575,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036450614300000002,
"min": 0.0036450614300000002,
"max": 0.12121558322,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009753139689564705,
"min": 0.009753139689564705,
"max": 0.5411096215248108,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13654395937919617,
"min": 0.13654395937919617,
"max": 3.787767171859741,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 325.59139784946234,
"min": 295.7113402061856,
"max": 992.0588235294117,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30280.0,
"min": 16865.0,
"max": 33033.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.607976072670325,
"min": -0.8753529944840599,
"max": 1.6510138423135965,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 147.9337986856699,
"min": -27.060401760041714,
"max": 166.75239807367325,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.607976072670325,
"min": -0.8753529944840599,
"max": 1.6510138423135965,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 147.9337986856699,
"min": -27.060401760041714,
"max": 166.75239807367325,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.033075381974175914,
"min": 0.03036262451976504,
"max": 10.421461978379417,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0429351416241843,
"min": 2.945174578417209,
"max": 177.1648536324501,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687578294",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687579955"
},
"total": 1661.4448952060002,
"count": 1,
"self": 0.42218239400062885,
"children": {
"run_training.setup": {
"total": 0.036978220000037254,
"count": 1,
"self": 0.036978220000037254
},
"TrainerController.start_learning": {
"total": 1660.9857345919995,
"count": 1,
"self": 1.1225961098407424,
"children": {
"TrainerController._reset_env": {
"total": 3.9174938669998483,
"count": 1,
"self": 3.9174938669998483
},
"TrainerController.advance": {
"total": 1655.8444173211583,
"count": 64105,
"self": 1.1414434509774765,
"children": {
"env_step": {
"total": 1118.49894777006,
"count": 64105,
"self": 1025.4449445362875,
"children": {
"SubprocessEnvManager._take_step": {
"total": 92.37147787782715,
"count": 64105,
"self": 4.0477762618584165,
"children": {
"TorchPolicy.evaluate": {
"total": 88.32370161596873,
"count": 62558,
"self": 88.32370161596873
}
}
},
"workers": {
"total": 0.6825253559454723,
"count": 64105,
"self": 0.0,
"children": {
"worker_root": {
"total": 1658.6735151308567,
"count": 64105,
"is_parallel": true,
"self": 718.9195903288955,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017737970001689973,
"count": 1,
"is_parallel": true,
"self": 0.0005266350003694242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001247161999799573,
"count": 8,
"is_parallel": true,
"self": 0.001247161999799573
}
}
},
"UnityEnvironment.step": {
"total": 0.038793466000242915,
"count": 1,
"is_parallel": true,
"self": 0.00039812300019548275,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00028096599999116734,
"count": 1,
"is_parallel": true,
"self": 0.00028096599999116734
},
"communicator.exchange": {
"total": 0.03697890300009021,
"count": 1,
"is_parallel": true,
"self": 0.03697890300009021
},
"steps_from_proto": {
"total": 0.0011354739999660524,
"count": 1,
"is_parallel": true,
"self": 0.00022795299901190447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009075210009541479,
"count": 8,
"is_parallel": true,
"self": 0.0009075210009541479
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 939.7539248019611,
"count": 64104,
"is_parallel": true,
"self": 22.151959591755258,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.221367578104946,
"count": 64104,
"is_parallel": true,
"self": 15.221367578104946
},
"communicator.exchange": {
"total": 836.6234946870759,
"count": 64104,
"is_parallel": true,
"self": 836.6234946870759
},
"steps_from_proto": {
"total": 65.75710294502505,
"count": 64104,
"is_parallel": true,
"self": 13.158115667948096,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.59898727707696,
"count": 512832,
"is_parallel": true,
"self": 52.59898727707696
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 536.2040261001207,
"count": 64105,
"self": 2.235682288197495,
"children": {
"process_trajectory": {
"total": 92.05998204392517,
"count": 64105,
"self": 91.8591094729245,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2008725710006729,
"count": 2,
"self": 0.2008725710006729
}
}
},
"_update_policy": {
"total": 441.90836176799803,
"count": 460,
"self": 277.6746321429464,
"children": {
"TorchPPOOptimizer.update": {
"total": 164.23372962505164,
"count": 22800,
"self": 164.23372962505164
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2210002751089633e-06,
"count": 1,
"self": 1.2210002751089633e-06
},
"TrainerController._save_models": {
"total": 0.1012260730003618,
"count": 1,
"self": 0.001488472000346519,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09973760100001527,
"count": 1,
"self": 0.09973760100001527
}
}
}
}
}
}
}