tinybert_base_train_kd / trainer_state.json
gokulsrinivasagan's picture
End of training
9074521 verified
{
"best_global_step": 50000,
"best_metric": 63.090843200683594,
"best_model_checkpoint": "tinybert_base_train_kd/checkpoint-50000",
"epoch": 25.0,
"eval_steps": 10000,
"global_step": 59550,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20990764063811923,
"grad_norm": 1038.087890625,
"learning_rate": 4.9900000000000005e-06,
"loss": 6028.7805,
"step": 500
},
{
"epoch": 0.41981528127623846,
"grad_norm": 1000.9192504882812,
"learning_rate": 9.990000000000001e-06,
"loss": 3688.4922,
"step": 1000
},
{
"epoch": 0.6297229219143576,
"grad_norm": 1443.9014892578125,
"learning_rate": 1.499e-05,
"loss": 2203.5858,
"step": 1500
},
{
"epoch": 0.8396305625524769,
"grad_norm": 2327.7587890625,
"learning_rate": 1.999e-05,
"loss": 1477.7162,
"step": 2000
},
{
"epoch": 1.0495382031905962,
"grad_norm": 1928.0552978515625,
"learning_rate": 2.4990000000000003e-05,
"loss": 1145.1917,
"step": 2500
},
{
"epoch": 1.2594458438287153,
"grad_norm": 1427.1600341796875,
"learning_rate": 2.9990000000000003e-05,
"loss": 941.4575,
"step": 3000
},
{
"epoch": 1.4693534844668346,
"grad_norm": 832.8273315429688,
"learning_rate": 3.499e-05,
"loss": 812.8064,
"step": 3500
},
{
"epoch": 1.6792611251049538,
"grad_norm": 880.94140625,
"learning_rate": 3.999e-05,
"loss": 722.3781,
"step": 4000
},
{
"epoch": 1.8891687657430731,
"grad_norm": 458.8153381347656,
"learning_rate": 4.499e-05,
"loss": 657.1395,
"step": 4500
},
{
"epoch": 2.0990764063811924,
"grad_norm": 192.89340209960938,
"learning_rate": 4.999e-05,
"loss": 608.8174,
"step": 5000
},
{
"epoch": 2.3089840470193117,
"grad_norm": 173.0674285888672,
"learning_rate": 5.499000000000001e-05,
"loss": 575.3464,
"step": 5500
},
{
"epoch": 2.5188916876574305,
"grad_norm": 119.65222930908203,
"learning_rate": 5.999e-05,
"loss": 550.9334,
"step": 6000
},
{
"epoch": 2.72879932829555,
"grad_norm": 163.03219604492188,
"learning_rate": 6.499000000000001e-05,
"loss": 532.3316,
"step": 6500
},
{
"epoch": 2.938706968933669,
"grad_norm": 103.94256591796875,
"learning_rate": 6.999e-05,
"loss": 516.1778,
"step": 7000
},
{
"epoch": 3.1486146095717884,
"grad_norm": 137.8463134765625,
"learning_rate": 7.499e-05,
"loss": 502.3519,
"step": 7500
},
{
"epoch": 3.3585222502099077,
"grad_norm": 93.18783569335938,
"learning_rate": 7.999000000000001e-05,
"loss": 493.4963,
"step": 8000
},
{
"epoch": 3.568429890848027,
"grad_norm": 81.264404296875,
"learning_rate": 8.499e-05,
"loss": 486.5324,
"step": 8500
},
{
"epoch": 3.7783375314861463,
"grad_norm": 96.80731201171875,
"learning_rate": 8.999000000000001e-05,
"loss": 477.9452,
"step": 9000
},
{
"epoch": 3.988245172124265,
"grad_norm": 99.205322265625,
"learning_rate": 9.499e-05,
"loss": 463.8162,
"step": 9500
},
{
"epoch": 4.198152812762385,
"grad_norm": 118.66422271728516,
"learning_rate": 9.999000000000001e-05,
"loss": 447.3158,
"step": 10000
},
{
"epoch": 4.198152812762385,
"eval_accuracy": 0.16583655638721168,
"eval_loss": 428.842041015625,
"eval_runtime": 3.6172,
"eval_samples_per_second": 132.423,
"eval_steps_per_second": 1.382,
"step": 10000
},
{
"epoch": 4.408060453400504,
"grad_norm": 144.82168579101562,
"learning_rate": 9.899293642785066e-05,
"loss": 430.4443,
"step": 10500
},
{
"epoch": 4.617968094038623,
"grad_norm": 166.46974182128906,
"learning_rate": 9.798385469223008e-05,
"loss": 416.3207,
"step": 11000
},
{
"epoch": 4.827875734676742,
"grad_norm": 232.3704071044922,
"learning_rate": 9.69747729566095e-05,
"loss": 403.2591,
"step": 11500
},
{
"epoch": 5.037783375314861,
"grad_norm": 254.5363311767578,
"learning_rate": 9.59656912209889e-05,
"loss": 390.4413,
"step": 12000
},
{
"epoch": 5.247691015952981,
"grad_norm": 270.5792236328125,
"learning_rate": 9.495660948536832e-05,
"loss": 377.3261,
"step": 12500
},
{
"epoch": 5.4575986565911,
"grad_norm": 244.77581787109375,
"learning_rate": 9.394752774974774e-05,
"loss": 365.2654,
"step": 13000
},
{
"epoch": 5.667506297229219,
"grad_norm": 267.1288757324219,
"learning_rate": 9.293844601412714e-05,
"loss": 352.7486,
"step": 13500
},
{
"epoch": 5.877413937867338,
"grad_norm": 315.1538391113281,
"learning_rate": 9.192936427850656e-05,
"loss": 338.1035,
"step": 14000
},
{
"epoch": 6.087321578505458,
"grad_norm": 344.386962890625,
"learning_rate": 9.092028254288598e-05,
"loss": 323.3387,
"step": 14500
},
{
"epoch": 6.297229219143577,
"grad_norm": 345.59320068359375,
"learning_rate": 8.991120080726539e-05,
"loss": 308.7323,
"step": 15000
},
{
"epoch": 6.507136859781696,
"grad_norm": 373.200439453125,
"learning_rate": 8.890211907164481e-05,
"loss": 293.0822,
"step": 15500
},
{
"epoch": 6.717044500419815,
"grad_norm": 387.7672119140625,
"learning_rate": 8.789303733602423e-05,
"loss": 276.3089,
"step": 16000
},
{
"epoch": 6.926952141057934,
"grad_norm": 380.2518615722656,
"learning_rate": 8.688395560040363e-05,
"loss": 257.2038,
"step": 16500
},
{
"epoch": 7.136859781696054,
"grad_norm": 306.2325134277344,
"learning_rate": 8.587487386478305e-05,
"loss": 221.4098,
"step": 17000
},
{
"epoch": 7.346767422334173,
"grad_norm": 244.85455322265625,
"learning_rate": 8.486579212916247e-05,
"loss": 200.0532,
"step": 17500
},
{
"epoch": 7.5566750629722925,
"grad_norm": 243.97576904296875,
"learning_rate": 8.385671039354188e-05,
"loss": 186.9741,
"step": 18000
},
{
"epoch": 7.766582703610411,
"grad_norm": 201.4752655029297,
"learning_rate": 8.28476286579213e-05,
"loss": 176.1845,
"step": 18500
},
{
"epoch": 7.97649034424853,
"grad_norm": 180.40164184570312,
"learning_rate": 8.183854692230071e-05,
"loss": 167.2202,
"step": 19000
},
{
"epoch": 8.18639798488665,
"grad_norm": 169.40943908691406,
"learning_rate": 8.082946518668012e-05,
"loss": 158.6278,
"step": 19500
},
{
"epoch": 8.39630562552477,
"grad_norm": 142.1228790283203,
"learning_rate": 7.982038345105954e-05,
"loss": 151.7919,
"step": 20000
},
{
"epoch": 8.39630562552477,
"eval_accuracy": 0.48157916888801594,
"eval_loss": 135.78590393066406,
"eval_runtime": 3.5122,
"eval_samples_per_second": 136.38,
"eval_steps_per_second": 1.424,
"step": 20000
},
{
"epoch": 8.606213266162888,
"grad_norm": 155.93728637695312,
"learning_rate": 7.881130171543896e-05,
"loss": 145.7355,
"step": 20500
},
{
"epoch": 8.816120906801007,
"grad_norm": 139.52723693847656,
"learning_rate": 7.780221997981836e-05,
"loss": 140.6628,
"step": 21000
},
{
"epoch": 9.026028547439127,
"grad_norm": 146.34320068359375,
"learning_rate": 7.679313824419778e-05,
"loss": 135.8976,
"step": 21500
},
{
"epoch": 9.235936188077247,
"grad_norm": 129.9792938232422,
"learning_rate": 7.57840565085772e-05,
"loss": 131.474,
"step": 22000
},
{
"epoch": 9.445843828715365,
"grad_norm": 122.61577606201172,
"learning_rate": 7.477497477295662e-05,
"loss": 127.8668,
"step": 22500
},
{
"epoch": 9.655751469353484,
"grad_norm": 130.75668334960938,
"learning_rate": 7.376589303733603e-05,
"loss": 124.6147,
"step": 23000
},
{
"epoch": 9.865659109991604,
"grad_norm": 130.1267547607422,
"learning_rate": 7.275681130171544e-05,
"loss": 121.6354,
"step": 23500
},
{
"epoch": 10.075566750629722,
"grad_norm": 138.63009643554688,
"learning_rate": 7.174772956609486e-05,
"loss": 118.5567,
"step": 24000
},
{
"epoch": 10.285474391267842,
"grad_norm": 128.6032257080078,
"learning_rate": 7.073864783047427e-05,
"loss": 115.905,
"step": 24500
},
{
"epoch": 10.495382031905962,
"grad_norm": 117.67427825927734,
"learning_rate": 6.972956609485369e-05,
"loss": 113.5025,
"step": 25000
},
{
"epoch": 10.705289672544081,
"grad_norm": 132.61322021484375,
"learning_rate": 6.872048435923311e-05,
"loss": 111.2961,
"step": 25500
},
{
"epoch": 10.9151973131822,
"grad_norm": 124.02651977539062,
"learning_rate": 6.771140262361251e-05,
"loss": 109.3261,
"step": 26000
},
{
"epoch": 11.125104953820319,
"grad_norm": 116.80554962158203,
"learning_rate": 6.670232088799193e-05,
"loss": 106.9513,
"step": 26500
},
{
"epoch": 11.335012594458439,
"grad_norm": 120.98764038085938,
"learning_rate": 6.569323915237135e-05,
"loss": 105.1095,
"step": 27000
},
{
"epoch": 11.544920235096557,
"grad_norm": 109.36351776123047,
"learning_rate": 6.468415741675076e-05,
"loss": 103.5172,
"step": 27500
},
{
"epoch": 11.754827875734676,
"grad_norm": 115.52029418945312,
"learning_rate": 6.367507568113018e-05,
"loss": 101.8046,
"step": 28000
},
{
"epoch": 11.964735516372796,
"grad_norm": 110.40377044677734,
"learning_rate": 6.26659939455096e-05,
"loss": 100.335,
"step": 28500
},
{
"epoch": 12.174643157010916,
"grad_norm": 106.47163391113281,
"learning_rate": 6.1656912209889e-05,
"loss": 98.6005,
"step": 29000
},
{
"epoch": 12.384550797649034,
"grad_norm": 114.08397674560547,
"learning_rate": 6.064783047426842e-05,
"loss": 97.2174,
"step": 29500
},
{
"epoch": 12.594458438287154,
"grad_norm": 108.48992156982422,
"learning_rate": 5.963874873864783e-05,
"loss": 95.9257,
"step": 30000
},
{
"epoch": 12.594458438287154,
"eval_accuracy": 0.5308223969555362,
"eval_loss": 84.8065414428711,
"eval_runtime": 3.6726,
"eval_samples_per_second": 130.426,
"eval_steps_per_second": 1.361,
"step": 30000
},
{
"epoch": 12.804366078925273,
"grad_norm": 116.85979461669922,
"learning_rate": 5.8629667003027243e-05,
"loss": 94.7173,
"step": 30500
},
{
"epoch": 13.014273719563391,
"grad_norm": 101.37419891357422,
"learning_rate": 5.762058526740667e-05,
"loss": 93.6031,
"step": 31000
},
{
"epoch": 13.224181360201511,
"grad_norm": 101.53105926513672,
"learning_rate": 5.661150353178608e-05,
"loss": 92.2381,
"step": 31500
},
{
"epoch": 13.43408900083963,
"grad_norm": 131.7721405029297,
"learning_rate": 5.5602421796165494e-05,
"loss": 91.2291,
"step": 32000
},
{
"epoch": 13.64399664147775,
"grad_norm": 147.26918029785156,
"learning_rate": 5.4593340060544906e-05,
"loss": 90.265,
"step": 32500
},
{
"epoch": 13.853904282115868,
"grad_norm": 107.17750549316406,
"learning_rate": 5.358425832492432e-05,
"loss": 89.2605,
"step": 33000
},
{
"epoch": 14.063811922753988,
"grad_norm": 135.6392364501953,
"learning_rate": 5.257517658930373e-05,
"loss": 88.1679,
"step": 33500
},
{
"epoch": 14.273719563392108,
"grad_norm": 131.0419158935547,
"learning_rate": 5.1566094853683156e-05,
"loss": 87.1749,
"step": 34000
},
{
"epoch": 14.483627204030226,
"grad_norm": 110.07307434082031,
"learning_rate": 5.055701311806257e-05,
"loss": 86.2937,
"step": 34500
},
{
"epoch": 14.693534844668346,
"grad_norm": 107.72781372070312,
"learning_rate": 4.954793138244198e-05,
"loss": 85.5873,
"step": 35000
},
{
"epoch": 14.903442485306465,
"grad_norm": 91.38671112060547,
"learning_rate": 4.853884964682139e-05,
"loss": 84.7775,
"step": 35500
},
{
"epoch": 15.113350125944585,
"grad_norm": 107.55229187011719,
"learning_rate": 4.752976791120081e-05,
"loss": 83.9663,
"step": 36000
},
{
"epoch": 15.323257766582703,
"grad_norm": 93.78266906738281,
"learning_rate": 4.6520686175580225e-05,
"loss": 83.2307,
"step": 36500
},
{
"epoch": 15.533165407220823,
"grad_norm": 94.07606506347656,
"learning_rate": 4.551160443995964e-05,
"loss": 82.4276,
"step": 37000
},
{
"epoch": 15.743073047858942,
"grad_norm": 89.64346313476562,
"learning_rate": 4.4502522704339056e-05,
"loss": 81.7749,
"step": 37500
},
{
"epoch": 15.95298068849706,
"grad_norm": 90.450927734375,
"learning_rate": 4.349344096871847e-05,
"loss": 81.2509,
"step": 38000
},
{
"epoch": 16.162888329135182,
"grad_norm": 104.07128143310547,
"learning_rate": 4.248435923309788e-05,
"loss": 80.4306,
"step": 38500
},
{
"epoch": 16.3727959697733,
"grad_norm": 97.70896911621094,
"learning_rate": 4.14752774974773e-05,
"loss": 79.7886,
"step": 39000
},
{
"epoch": 16.582703610411418,
"grad_norm": 107.2480697631836,
"learning_rate": 4.046619576185671e-05,
"loss": 79.2786,
"step": 39500
},
{
"epoch": 16.79261125104954,
"grad_norm": 94.23933410644531,
"learning_rate": 3.9457114026236124e-05,
"loss": 78.7736,
"step": 40000
},
{
"epoch": 16.79261125104954,
"eval_accuracy": 0.5467926432656723,
"eval_loss": 70.87545013427734,
"eval_runtime": 2.7701,
"eval_samples_per_second": 172.917,
"eval_steps_per_second": 1.805,
"step": 40000
},
{
"epoch": 17.002518891687657,
"grad_norm": 95.26321411132812,
"learning_rate": 3.844803229061554e-05,
"loss": 78.2565,
"step": 40500
},
{
"epoch": 17.212426532325775,
"grad_norm": 97.84280395507812,
"learning_rate": 3.7438950554994956e-05,
"loss": 77.6301,
"step": 41000
},
{
"epoch": 17.422334172963897,
"grad_norm": 118.63774871826172,
"learning_rate": 3.642986881937437e-05,
"loss": 77.0761,
"step": 41500
},
{
"epoch": 17.632241813602015,
"grad_norm": 120.7638168334961,
"learning_rate": 3.542078708375379e-05,
"loss": 76.71,
"step": 42000
},
{
"epoch": 17.842149454240133,
"grad_norm": 96.10567474365234,
"learning_rate": 3.44117053481332e-05,
"loss": 76.231,
"step": 42500
},
{
"epoch": 18.052057094878254,
"grad_norm": 109.37013244628906,
"learning_rate": 3.340262361251261e-05,
"loss": 75.7584,
"step": 43000
},
{
"epoch": 18.261964735516372,
"grad_norm": 81.04769134521484,
"learning_rate": 3.239354187689203e-05,
"loss": 75.1399,
"step": 43500
},
{
"epoch": 18.471872376154494,
"grad_norm": 107.45811462402344,
"learning_rate": 3.138446014127144e-05,
"loss": 74.9177,
"step": 44000
},
{
"epoch": 18.68178001679261,
"grad_norm": 91.4131088256836,
"learning_rate": 3.0375378405650862e-05,
"loss": 74.4926,
"step": 44500
},
{
"epoch": 18.89168765743073,
"grad_norm": 103.88982391357422,
"learning_rate": 2.9366296670030274e-05,
"loss": 74.1645,
"step": 45000
},
{
"epoch": 19.10159529806885,
"grad_norm": 101.31675720214844,
"learning_rate": 2.8357214934409686e-05,
"loss": 73.6817,
"step": 45500
},
{
"epoch": 19.31150293870697,
"grad_norm": 97.94842529296875,
"learning_rate": 2.7348133198789106e-05,
"loss": 73.2026,
"step": 46000
},
{
"epoch": 19.521410579345087,
"grad_norm": 85.70989990234375,
"learning_rate": 2.6339051463168518e-05,
"loss": 72.9578,
"step": 46500
},
{
"epoch": 19.73131821998321,
"grad_norm": 93.49505615234375,
"learning_rate": 2.532996972754793e-05,
"loss": 72.7161,
"step": 47000
},
{
"epoch": 19.941225860621326,
"grad_norm": 89.21786499023438,
"learning_rate": 2.4320887991927346e-05,
"loss": 72.3488,
"step": 47500
},
{
"epoch": 20.151133501259444,
"grad_norm": 86.77904510498047,
"learning_rate": 2.331180625630676e-05,
"loss": 71.9474,
"step": 48000
},
{
"epoch": 20.361041141897566,
"grad_norm": 104.81689453125,
"learning_rate": 2.2302724520686177e-05,
"loss": 71.6556,
"step": 48500
},
{
"epoch": 20.570948782535684,
"grad_norm": 85.92950439453125,
"learning_rate": 2.129364278506559e-05,
"loss": 71.3194,
"step": 49000
},
{
"epoch": 20.7808564231738,
"grad_norm": 86.256103515625,
"learning_rate": 2.0284561049445005e-05,
"loss": 71.1533,
"step": 49500
},
{
"epoch": 20.990764063811923,
"grad_norm": 92.44305419921875,
"learning_rate": 1.927547931382442e-05,
"loss": 70.9704,
"step": 50000
},
{
"epoch": 20.990764063811923,
"eval_accuracy": 0.5510415790506631,
"eval_loss": 63.090843200683594,
"eval_runtime": 2.8803,
"eval_samples_per_second": 166.303,
"eval_steps_per_second": 1.736,
"step": 50000
},
{
"epoch": 21.20067170445004,
"grad_norm": 80.83171844482422,
"learning_rate": 1.8266397578203836e-05,
"loss": 70.5263,
"step": 50500
},
{
"epoch": 21.410579345088163,
"grad_norm": 86.68183898925781,
"learning_rate": 1.725731584258325e-05,
"loss": 70.3711,
"step": 51000
},
{
"epoch": 21.62048698572628,
"grad_norm": 75.10340118408203,
"learning_rate": 1.6248234106962664e-05,
"loss": 70.0563,
"step": 51500
},
{
"epoch": 21.8303946263644,
"grad_norm": 80.75882720947266,
"learning_rate": 1.523915237134208e-05,
"loss": 69.9151,
"step": 52000
},
{
"epoch": 22.04030226700252,
"grad_norm": 80.31729888916016,
"learning_rate": 1.4230070635721492e-05,
"loss": 69.7056,
"step": 52500
},
{
"epoch": 22.250209907640638,
"grad_norm": 79.79283905029297,
"learning_rate": 1.3220988900100908e-05,
"loss": 69.3878,
"step": 53000
},
{
"epoch": 22.460117548278756,
"grad_norm": 88.36482238769531,
"learning_rate": 1.2211907164480324e-05,
"loss": 69.2763,
"step": 53500
},
{
"epoch": 22.670025188916878,
"grad_norm": 82.19640350341797,
"learning_rate": 1.1202825428859738e-05,
"loss": 69.1046,
"step": 54000
},
{
"epoch": 22.879932829554996,
"grad_norm": 77.26073455810547,
"learning_rate": 1.0193743693239152e-05,
"loss": 68.8851,
"step": 54500
},
{
"epoch": 23.089840470193113,
"grad_norm": 89.86582946777344,
"learning_rate": 9.184661957618567e-06,
"loss": 68.7541,
"step": 55000
},
{
"epoch": 23.299748110831235,
"grad_norm": 70.37703704833984,
"learning_rate": 8.175580221997981e-06,
"loss": 68.5219,
"step": 55500
},
{
"epoch": 23.509655751469353,
"grad_norm": 71.59619140625,
"learning_rate": 7.166498486377397e-06,
"loss": 68.3648,
"step": 56000
},
{
"epoch": 23.719563392107474,
"grad_norm": 69.95408630371094,
"learning_rate": 6.157416750756812e-06,
"loss": 68.3101,
"step": 56500
},
{
"epoch": 23.929471032745592,
"grad_norm": 71.7118148803711,
"learning_rate": 5.148335015136227e-06,
"loss": 68.1746,
"step": 57000
},
{
"epoch": 24.13937867338371,
"grad_norm": 67.84393310546875,
"learning_rate": 4.139253279515641e-06,
"loss": 67.9893,
"step": 57500
},
{
"epoch": 24.349286314021832,
"grad_norm": 70.03734588623047,
"learning_rate": 3.130171543895056e-06,
"loss": 67.9278,
"step": 58000
},
{
"epoch": 24.55919395465995,
"grad_norm": 67.93866729736328,
"learning_rate": 2.1210898082744702e-06,
"loss": 67.8202,
"step": 58500
},
{
"epoch": 24.769101595298068,
"grad_norm": 61.62685775756836,
"learning_rate": 1.112008072653885e-06,
"loss": 67.7249,
"step": 59000
},
{
"epoch": 24.97900923593619,
"grad_norm": 63.67091751098633,
"learning_rate": 1.0292633703329971e-07,
"loss": 67.6809,
"step": 59500
},
{
"epoch": 25.0,
"step": 59550,
"total_flos": 7.577162542158336e+17,
"train_loss": 301.8068682845954,
"train_runtime": 49685.0337,
"train_samples_per_second": 115.044,
"train_steps_per_second": 1.199
}
],
"logging_steps": 500,
"max_steps": 59550,
"num_input_tokens_seen": 0,
"num_train_epochs": 25,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.577162542158336e+17,
"train_batch_size": 96,
"trial_name": null,
"trial_params": null
}