basic-pyramids2 / run_logs /timers.json
LeoAgis's picture
First commit
84ca591
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.47710397839546204,
"min": 0.47710397839546204,
"max": 1.4627878665924072,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14229.1494140625,
"min": 14197.02734375,
"max": 44375.1328125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989955.0,
"min": 29952.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989955.0,
"min": 29952.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5538780689239502,
"min": -0.10015533864498138,
"max": 0.596457839012146,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 152.87034606933594,
"min": -24.137435913085938,
"max": 167.60464477539062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011806127615272999,
"min": -0.010625611059367657,
"max": 0.3064531683921814,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.258491277694702,
"min": -2.752033233642578,
"max": 72.62940216064453,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06887278073034915,
"min": 0.06580621966737585,
"max": 0.07198270172423313,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.964218930224888,
"min": 0.47706385200271845,
"max": 1.0466934883152135,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015393837669675812,
"min": 0.0006998234438160941,
"max": 0.015393837669675812,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21551372737546137,
"min": 0.009097704769609223,
"max": 0.21551372737546137,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.376626112585711e-06,
"min": 7.376626112585711e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010327276557619995,
"min": 0.00010327276557619995,
"max": 0.0032562698145767997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245884285714286,
"min": 0.10245884285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344238,
"min": 1.3886848,
"max": 2.4428396000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025563840142857135,
"min": 0.00025563840142857135,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035789376199999987,
"min": 0.0035789376199999987,
"max": 0.10856377768,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010403701104223728,
"min": 0.010403701104223728,
"max": 0.3884736895561218,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14565181732177734,
"min": 0.14565181732177734,
"max": 2.719315767288208,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 301.7142857142857,
"min": 301.7142857142857,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29568.0,
"min": 15984.0,
"max": 33090.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6778734493620542,
"min": -1.0000000521540642,
"max": 1.6778734493620542,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 164.4315980374813,
"min": -30.329601660370827,
"max": 164.4315980374813,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6778734493620542,
"min": -1.0000000521540642,
"max": 1.6778734493620542,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 164.4315980374813,
"min": -30.329601660370827,
"max": 164.4315980374813,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03281591789872975,
"min": 0.03281591789872975,
"max": 7.774173963814974,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2159599540755153,
"min": 3.2159599540755153,
"max": 124.38678342103958,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677615639",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677618139"
},
"total": 2500.7735523240003,
"count": 1,
"self": 1.1312119890003487,
"children": {
"run_training.setup": {
"total": 0.12333247000015035,
"count": 1,
"self": 0.12333247000015035
},
"TrainerController.start_learning": {
"total": 2499.519007865,
"count": 1,
"self": 1.4395212949011693,
"children": {
"TrainerController._reset_env": {
"total": 7.570122277999872,
"count": 1,
"self": 7.570122277999872
},
"TrainerController.advance": {
"total": 2490.3652294560993,
"count": 63800,
"self": 1.4803709151051407,
"children": {
"env_step": {
"total": 1676.9278215150362,
"count": 63800,
"self": 1554.4803553699135,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.52656525507837,
"count": 63800,
"self": 4.939296133009975,
"children": {
"TorchPolicy.evaluate": {
"total": 116.58726912206839,
"count": 62574,
"self": 39.537988902992765,
"children": {
"TorchPolicy.sample_actions": {
"total": 77.04928021907563,
"count": 62574,
"self": 77.04928021907563
}
}
}
}
},
"workers": {
"total": 0.9209008900443223,
"count": 63800,
"self": 0.0,
"children": {
"worker_root": {
"total": 2493.6107333059467,
"count": 63800,
"is_parallel": true,
"self": 1063.1157248370646,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019304730003568693,
"count": 1,
"is_parallel": true,
"self": 0.0007121049998204398,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012183680005364295,
"count": 8,
"is_parallel": true,
"self": 0.0012183680005364295
}
}
},
"UnityEnvironment.step": {
"total": 0.05069885700004306,
"count": 1,
"is_parallel": true,
"self": 0.0005657159999827854,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005481930002133595,
"count": 1,
"is_parallel": true,
"self": 0.0005481930002133595
},
"communicator.exchange": {
"total": 0.04797172799999316,
"count": 1,
"is_parallel": true,
"self": 0.04797172799999316
},
"steps_from_proto": {
"total": 0.0016132199998537544,
"count": 1,
"is_parallel": true,
"self": 0.00042023999958473723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011929800002690172,
"count": 8,
"is_parallel": true,
"self": 0.0011929800002690172
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1430.4950084688821,
"count": 63799,
"is_parallel": true,
"self": 33.98844147596992,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.04005728393213,
"count": 63799,
"is_parallel": true,
"self": 25.04005728393213
},
"communicator.exchange": {
"total": 1272.019068234968,
"count": 63799,
"is_parallel": true,
"self": 1272.019068234968
},
"steps_from_proto": {
"total": 99.44744147401207,
"count": 63799,
"is_parallel": true,
"self": 23.83754373911188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.60989773490019,
"count": 510392,
"is_parallel": true,
"self": 75.60989773490019
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 811.957037025958,
"count": 63800,
"self": 2.677151846871311,
"children": {
"process_trajectory": {
"total": 177.14451859408246,
"count": 63800,
"self": 176.89812841208186,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24639018200059581,
"count": 2,
"self": 0.24639018200059581
}
}
},
"_update_policy": {
"total": 632.1353665850042,
"count": 449,
"self": 244.5000073340434,
"children": {
"TorchPPOOptimizer.update": {
"total": 387.63535925096085,
"count": 22851,
"self": 387.63535925096085
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3310000213095918e-06,
"count": 1,
"self": 1.3310000213095918e-06
},
"TrainerController._save_models": {
"total": 0.144133504999445,
"count": 1,
"self": 0.0019653859990285127,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14216811900041648,
"count": 1,
"self": 0.14216811900041648
}
}
}
}
}
}
}