ppo-PyramidsRND / run_logs /timers.json
federicobecona's picture
First Push
0ae5603 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39645612239837646,
"min": 0.39645612239837646,
"max": 1.3250138759613037,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11868.310546875,
"min": 11868.310546875,
"max": 40195.62109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989901.0,
"min": 29906.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989901.0,
"min": 29906.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5158868432044983,
"min": -0.11220736801624298,
"max": 0.5190085172653198,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 140.32122802734375,
"min": -27.041975021362305,
"max": 145.3223876953125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03330960497260094,
"min": 0.00502209784463048,
"max": 0.4393036961555481,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.060212135314941,
"min": 1.3660106658935547,
"max": 104.11497497558594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06736215025011916,
"min": 0.06376748954693415,
"max": 0.07307037039632765,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0104322537517874,
"min": 0.7081734680120468,
"max": 1.0129141553576724,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01649904991627528,
"min": 0.000288388587590092,
"max": 0.01797669113633068,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24748574874412918,
"min": 0.0037758595197618545,
"max": 0.2516736759086295,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.528697490466667e-06,
"min": 7.528697490466667e-06,
"max": 0.00029500995166334995,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011293046235700001,
"min": 0.00011293046235700001,
"max": 0.0035069516310161993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250953333333333,
"min": 0.10250953333333333,
"max": 0.19833665000000003,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.537643,
"min": 1.4778597999999998,
"max": 2.5689838000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026070238,
"min": 0.00026070238,
"max": 0.009833831335000001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039105357,
"min": 0.0039105357,
"max": 0.11692148162000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012149433605372906,
"min": 0.012149433605372906,
"max": 0.5740084648132324,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1822414994239807,
"min": 0.17913666367530823,
"max": 5.740084648132324,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 373.6666666666667,
"min": 339.0731707317073,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30267.0,
"min": 16704.0,
"max": 32958.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5522394745070258,
"min": -0.999962551984936,
"max": 1.628877086452691,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 125.73139743506908,
"min": -31.998801663517952,
"max": 139.74259804934263,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5522394745070258,
"min": -0.999962551984936,
"max": 1.628877086452691,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 125.73139743506908,
"min": -31.998801663517952,
"max": 139.74259804934263,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04736815745630416,
"min": 0.045758283038901344,
"max": 10.398152391115824,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8368207539606374,
"min": 3.739934921817621,
"max": 187.16674304008484,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736574834",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1736577850"
},
"total": 3016.746591819,
"count": 1,
"self": 0.9681030470001133,
"children": {
"run_training.setup": {
"total": 0.07328374700000495,
"count": 1,
"self": 0.07328374700000495
},
"TrainerController.start_learning": {
"total": 3015.705205025,
"count": 1,
"self": 2.4698498469279,
"children": {
"TrainerController._reset_env": {
"total": 2.5213875869999356,
"count": 1,
"self": 2.5213875869999356
},
"TrainerController.advance": {
"total": 3010.6017421980723,
"count": 63640,
"self": 2.4163804060867733,
"children": {
"env_step": {
"total": 1973.2847697520806,
"count": 63640,
"self": 1811.8189667020306,
"children": {
"SubprocessEnvManager._take_step": {
"total": 160.08494011203902,
"count": 63640,
"self": 6.859010975913861,
"children": {
"TorchPolicy.evaluate": {
"total": 153.22592913612516,
"count": 62552,
"self": 153.22592913612516
}
}
},
"workers": {
"total": 1.3808629380109778,
"count": 63640,
"self": 0.0,
"children": {
"worker_root": {
"total": 3008.7061637980582,
"count": 63640,
"is_parallel": true,
"self": 1371.3164166971483,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025231390000044485,
"count": 1,
"is_parallel": true,
"self": 0.0007746059991404763,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017485330008639721,
"count": 8,
"is_parallel": true,
"self": 0.0017485330008639721
}
}
},
"UnityEnvironment.step": {
"total": 0.06258365800022148,
"count": 1,
"is_parallel": true,
"self": 0.0007942289998936758,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0038324820002344495,
"count": 1,
"is_parallel": true,
"self": 0.0038324820002344495
},
"communicator.exchange": {
"total": 0.055947755000033794,
"count": 1,
"is_parallel": true,
"self": 0.055947755000033794
},
"steps_from_proto": {
"total": 0.0020091920000595564,
"count": 1,
"is_parallel": true,
"self": 0.00043326000013621524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015759319999233412,
"count": 8,
"is_parallel": true,
"self": 0.0015759319999233412
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1637.38974710091,
"count": 63639,
"is_parallel": true,
"self": 48.435109216812634,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 29.401752937023502,
"count": 63639,
"is_parallel": true,
"self": 29.401752937023502
},
"communicator.exchange": {
"total": 1429.235093365045,
"count": 63639,
"is_parallel": true,
"self": 1429.235093365045
},
"steps_from_proto": {
"total": 130.3177915820288,
"count": 63639,
"is_parallel": true,
"self": 28.057284848060135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 102.26050673396867,
"count": 509112,
"is_parallel": true,
"self": 102.26050673396867
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1034.900592039905,
"count": 63640,
"self": 4.258381533766169,
"children": {
"process_trajectory": {
"total": 162.3159283801292,
"count": 63640,
"self": 162.07689881212855,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23902956800066022,
"count": 2,
"self": 0.23902956800066022
}
}
},
"_update_policy": {
"total": 868.3262821260096,
"count": 451,
"self": 362.8835018230284,
"children": {
"TorchPPOOptimizer.update": {
"total": 505.4427803029812,
"count": 22794,
"self": 505.4427803029812
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5949999578879215e-06,
"count": 1,
"self": 1.5949999578879215e-06
},
"TrainerController._save_models": {
"total": 0.11222379799983173,
"count": 1,
"self": 0.006230803000107699,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10599299499972403,
"count": 1,
"self": 0.10599299499972403
}
}
}
}
}
}
}