ppo-Pyramids / run_logs /timers.json
dylwil3's picture
First commit.
39da492
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3693426251411438,
"min": 0.36812862753868103,
"max": 1.494761347770691,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11056.640625,
"min": 10984.9580078125,
"max": 45345.08203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5459956526756287,
"min": -0.10052170604467392,
"max": 0.5909854769706726,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 146.3268280029297,
"min": -24.225730895996094,
"max": 165.47593688964844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.7202028036117554,
"min": -0.21068881452083588,
"max": 1.7202028036117554,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 461.01434326171875,
"min": -58.99286651611328,
"max": 461.01434326171875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07005687046028786,
"min": 0.06551805635034623,
"max": 0.07255054541651507,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.98079618644403,
"min": 0.4861065910994637,
"max": 1.0682836636163606,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.3282941421901504,
"min": 0.00028244519224593763,
"max": 0.3282941421901504,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 4.596117990662106,
"min": 0.003671787499197189,
"max": 4.596117990662106,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.639768882014292e-06,
"min": 7.639768882014292e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010695676434820007,
"min": 0.00010695676434820007,
"max": 0.0035085512304829994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254655714285717,
"min": 0.10254655714285717,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356518000000003,
"min": 1.3691136000000002,
"max": 2.5695170000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026440105857142884,
"min": 0.00026440105857142884,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037016148200000034,
"min": 0.0037016148200000034,
"max": 0.1169747483,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01313258707523346,
"min": 0.01313258707523346,
"max": 0.3453860878944397,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18385621905326843,
"min": 0.18385621905326843,
"max": 2.4177026748657227,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 366.3333333333333,
"min": 321.1208791208791,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28574.0,
"min": 15984.0,
"max": 32599.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5054256165256867,
"min": -1.0000000521540642,
"max": 1.6279031977217684,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 117.42319808900356,
"min": -32.000001668930054,
"max": 151.39499738812447,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5054256165256867,
"min": -1.0000000521540642,
"max": 1.6279031977217684,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 117.42319808900356,
"min": -32.000001668930054,
"max": 151.39499738812447,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05020725337985473,
"min": 0.04541333345632365,
"max": 6.338125875219703,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.916165763628669,
"min": 3.916165763628669,
"max": 101.41001400351524,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681761805",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681763992"
},
"total": 2186.711137513,
"count": 1,
"self": 0.5378457830001935,
"children": {
"run_training.setup": {
"total": 0.19830761899993377,
"count": 1,
"self": 0.19830761899993377
},
"TrainerController.start_learning": {
"total": 2185.9749841109997,
"count": 1,
"self": 1.3274526110349143,
"children": {
"TrainerController._reset_env": {
"total": 4.127969364000023,
"count": 1,
"self": 4.127969364000023
},
"TrainerController.advance": {
"total": 2180.4180531169645,
"count": 63856,
"self": 1.4171396329825257,
"children": {
"env_step": {
"total": 1550.106284176955,
"count": 63856,
"self": 1444.7630942700234,
"children": {
"SubprocessEnvManager._take_step": {
"total": 104.52762288296685,
"count": 63856,
"self": 4.772309941992489,
"children": {
"TorchPolicy.evaluate": {
"total": 99.75531294097436,
"count": 62552,
"self": 99.75531294097436
}
}
},
"workers": {
"total": 0.8155670239646042,
"count": 63856,
"self": 0.0,
"children": {
"worker_root": {
"total": 2181.1032431690005,
"count": 63856,
"is_parallel": true,
"self": 846.3810242849736,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00535000900003979,
"count": 1,
"is_parallel": true,
"self": 0.004000617000201601,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013493919998381898,
"count": 8,
"is_parallel": true,
"self": 0.0013493919998381898
}
}
},
"UnityEnvironment.step": {
"total": 0.0527256950000492,
"count": 1,
"is_parallel": true,
"self": 0.0005684270000756442,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000429528000040591,
"count": 1,
"is_parallel": true,
"self": 0.000429528000040591
},
"communicator.exchange": {
"total": 0.050127680999935365,
"count": 1,
"is_parallel": true,
"self": 0.050127680999935365
},
"steps_from_proto": {
"total": 0.0016000589999976,
"count": 1,
"is_parallel": true,
"self": 0.00037218499983282527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012278740001647748,
"count": 8,
"is_parallel": true,
"self": 0.0012278740001647748
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1334.722218884027,
"count": 63855,
"is_parallel": true,
"self": 33.52364890806848,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.40606335596749,
"count": 63855,
"is_parallel": true,
"self": 23.40606335596749
},
"communicator.exchange": {
"total": 1181.9701114420072,
"count": 63855,
"is_parallel": true,
"self": 1181.9701114420072
},
"steps_from_proto": {
"total": 95.82239517798371,
"count": 63855,
"is_parallel": true,
"self": 20.183972001866323,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.63842317611739,
"count": 510840,
"is_parallel": true,
"self": 75.63842317611739
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 628.8946293070269,
"count": 63856,
"self": 2.5223227760202462,
"children": {
"process_trajectory": {
"total": 110.00763283200979,
"count": 63856,
"self": 106.36404853800946,
"children": {
"RLTrainer._checkpoint": {
"total": 3.643584294000334,
"count": 33,
"self": 3.643584294000334
}
}
},
"_update_policy": {
"total": 516.3646736989969,
"count": 445,
"self": 330.62579184699143,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.73888185200542,
"count": 22830,
"self": 185.73888185200542
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1350002750987187e-06,
"count": 1,
"self": 1.1350002750987187e-06
},
"TrainerController._save_models": {
"total": 0.10150788400005695,
"count": 1,
"self": 0.0014567620000889292,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10005112199996802,
"count": 1,
"self": 0.10005112199996802
}
}
}
}
}
}
}