ppo-PyramidsRND / run_logs /timers.json
UtopiansRareTruth's picture
First attempt
486e1db
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.20224155485630035,
"min": 0.18919891119003296,
"max": 1.4760081768035889,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 6196.68115234375,
"min": 5703.2119140625,
"max": 44776.18359375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999986.0,
"min": 29952.0,
"max": 2999986.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999986.0,
"min": 29952.0,
"max": 2999986.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7382817268371582,
"min": -0.09220904856920242,
"max": 0.8281110525131226,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 216.31654357910156,
"min": -22.222381591796875,
"max": 249.2614288330078,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.026427878066897392,
"min": -0.044449660927057266,
"max": 0.287759006023407,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.743368148803711,
"min": -11.468012809753418,
"max": 69.34992218017578,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06934471040585777,
"min": 0.06548006870187952,
"max": 0.07358330100292473,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9708259456820088,
"min": 0.5074961291741173,
"max": 1.101432405722638,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015732101140505016,
"min": 0.00012755344975031127,
"max": 0.015793266620936004,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2202494159670702,
"min": 0.0016581948467540466,
"max": 0.2249048069206765,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4836495054833307e-06,
"min": 1.4836495054833307e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.077109307676663e-05,
"min": 2.077109307676663e-05,
"max": 0.0037575958474681324,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049451666666666,
"min": 0.10049451666666666,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4069232333333332,
"min": 1.3962282666666668,
"max": 2.7525318666666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.940221499999992e-05,
"min": 5.940221499999992e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008316310099999989,
"min": 0.0008316310099999989,
"max": 0.12527793348,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004561177454888821,
"min": 0.004520542919635773,
"max": 0.4806061387062073,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.06385648250579834,
"min": 0.06328760087490082,
"max": 3.3642430305480957,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 231.32824427480915,
"min": 230.888,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30304.0,
"min": 15984.0,
"max": 33813.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7676412083265436,
"min": -1.0000000521540642,
"max": 1.7676412083265436,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 231.5609982907772,
"min": -31.997601687908173,
"max": 231.5609982907772,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7676412083265436,
"min": -1.0000000521540642,
"max": 1.7676412083265436,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 231.5609982907772,
"min": -31.997601687908173,
"max": 231.5609982907772,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01103419904671085,
"min": 0.01103419904671085,
"max": 9.366937700659037,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.4454800751191215,
"min": 1.3135593771221465,
"max": 149.8710032105446,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675634993",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675641951"
},
"total": 6957.62252999,
"count": 1,
"self": 0.42453726300118433,
"children": {
"run_training.setup": {
"total": 0.12297241799933545,
"count": 1,
"self": 0.12297241799933545
},
"TrainerController.start_learning": {
"total": 6957.075020308999,
"count": 1,
"self": 3.9146723009289417,
"children": {
"TrainerController._reset_env": {
"total": 5.709313559999828,
"count": 1,
"self": 5.709313559999828
},
"TrainerController.advance": {
"total": 6947.36757465907,
"count": 194181,
"self": 4.288722022371076,
"children": {
"env_step": {
"total": 4770.795569283065,
"count": 194181,
"self": 4441.837723978272,
"children": {
"SubprocessEnvManager._take_step": {
"total": 326.483922036894,
"count": 194181,
"self": 13.773610896353603,
"children": {
"TorchPolicy.evaluate": {
"total": 312.7103111405404,
"count": 187576,
"self": 105.27595684465905,
"children": {
"TorchPolicy.sample_actions": {
"total": 207.43435429588135,
"count": 187576,
"self": 207.43435429588135
}
}
}
}
},
"workers": {
"total": 2.473923267898499,
"count": 194181,
"self": 0.0,
"children": {
"worker_root": {
"total": 6943.597529181034,
"count": 194181,
"is_parallel": true,
"self": 2830.8212949768276,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017564170002515311,
"count": 1,
"is_parallel": true,
"self": 0.0006539960004374734,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011024209998140577,
"count": 8,
"is_parallel": true,
"self": 0.0011024209998140577
}
}
},
"UnityEnvironment.step": {
"total": 0.044906576000357745,
"count": 1,
"is_parallel": true,
"self": 0.00045311000121728284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004069540000273264,
"count": 1,
"is_parallel": true,
"self": 0.0004069540000273264
},
"communicator.exchange": {
"total": 0.04244133399970451,
"count": 1,
"is_parallel": true,
"self": 0.04244133399970451
},
"steps_from_proto": {
"total": 0.0016051779994086246,
"count": 1,
"is_parallel": true,
"self": 0.00043878099950234173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011663969999062829,
"count": 8,
"is_parallel": true,
"self": 0.0011663969999062829
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4112.776234204206,
"count": 194180,
"is_parallel": true,
"self": 80.99555811818391,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 65.40549096024279,
"count": 194180,
"is_parallel": true,
"self": 65.40549096024279
},
"communicator.exchange": {
"total": 3669.043468635401,
"count": 194180,
"is_parallel": true,
"self": 3669.043468635401
},
"steps_from_proto": {
"total": 297.3317164903783,
"count": 194180,
"is_parallel": true,
"self": 66.07946148373594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 231.25225500664237,
"count": 1553440,
"is_parallel": true,
"self": 231.25225500664237
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2172.283283353634,
"count": 194181,
"self": 7.478285970982142,
"children": {
"process_trajectory": {
"total": 494.10770031464654,
"count": 194181,
"self": 493.5391422076473,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5685581069992622,
"count": 6,
"self": 0.5685581069992622
}
}
},
"_update_policy": {
"total": 1670.6972970680054,
"count": 1387,
"self": 655.4531367357622,
"children": {
"TorchPPOOptimizer.update": {
"total": 1015.2441603322432,
"count": 68457,
"self": 1015.2441603322432
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0710009519243613e-06,
"count": 1,
"self": 1.0710009519243613e-06
},
"TrainerController._save_models": {
"total": 0.08345871799974702,
"count": 1,
"self": 0.0014152609983284492,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08204345700141857,
"count": 1,
"self": 0.08204345700141857
}
}
}
}
}
}
}