SALAMA_NEW1010 / checkpoint-1839 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
1b85c91 verified
{
"best_global_step": 1800,
"best_metric": 0.25532727229341157,
"best_model_checkpoint": "./SALAMA_NEW99/checkpoint-1800",
"epoch": 3.0,
"eval_steps": 600,
"global_step": 1839,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01631321370309951,
"grad_norm": 0.8075992465019226,
"learning_rate": 1.8e-07,
"loss": 0.0109,
"step": 10
},
{
"epoch": 0.03262642740619902,
"grad_norm": 1.4262839555740356,
"learning_rate": 3.8e-07,
"loss": 0.0094,
"step": 20
},
{
"epoch": 0.048939641109298535,
"grad_norm": 2.3042991161346436,
"learning_rate": 5.800000000000001e-07,
"loss": 0.0093,
"step": 30
},
{
"epoch": 0.06525285481239804,
"grad_norm": 0.546521782875061,
"learning_rate": 7.8e-07,
"loss": 0.0218,
"step": 40
},
{
"epoch": 0.08156606851549755,
"grad_norm": 2.1409292221069336,
"learning_rate": 9.800000000000001e-07,
"loss": 0.0077,
"step": 50
},
{
"epoch": 0.09787928221859707,
"grad_norm": 1.9533841609954834,
"learning_rate": 1.1800000000000001e-06,
"loss": 0.0061,
"step": 60
},
{
"epoch": 0.11419249592169657,
"grad_norm": 1.0335050821304321,
"learning_rate": 1.3800000000000001e-06,
"loss": 0.0245,
"step": 70
},
{
"epoch": 0.13050570962479607,
"grad_norm": 1.158021330833435,
"learning_rate": 1.5800000000000001e-06,
"loss": 0.009,
"step": 80
},
{
"epoch": 0.1468189233278956,
"grad_norm": 2.7232327461242676,
"learning_rate": 1.7800000000000001e-06,
"loss": 0.0197,
"step": 90
},
{
"epoch": 0.1631321370309951,
"grad_norm": 1.2299067974090576,
"learning_rate": 1.98e-06,
"loss": 0.007,
"step": 100
},
{
"epoch": 0.17944535073409462,
"grad_norm": 2.173428535461426,
"learning_rate": 2.1800000000000003e-06,
"loss": 0.0098,
"step": 110
},
{
"epoch": 0.19575856443719414,
"grad_norm": 1.4795992374420166,
"learning_rate": 2.38e-06,
"loss": 0.0098,
"step": 120
},
{
"epoch": 0.21207177814029363,
"grad_norm": 0.48007962107658386,
"learning_rate": 2.5800000000000003e-06,
"loss": 0.0258,
"step": 130
},
{
"epoch": 0.22838499184339314,
"grad_norm": 0.5283637642860413,
"learning_rate": 2.7800000000000005e-06,
"loss": 0.0078,
"step": 140
},
{
"epoch": 0.24469820554649266,
"grad_norm": 1.748868465423584,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.0083,
"step": 150
},
{
"epoch": 0.26101141924959215,
"grad_norm": 2.2862608432769775,
"learning_rate": 3.1800000000000005e-06,
"loss": 0.012,
"step": 160
},
{
"epoch": 0.27732463295269166,
"grad_norm": 1.9614684581756592,
"learning_rate": 3.3800000000000007e-06,
"loss": 0.0093,
"step": 170
},
{
"epoch": 0.2936378466557912,
"grad_norm": 0.2269970178604126,
"learning_rate": 3.58e-06,
"loss": 0.0079,
"step": 180
},
{
"epoch": 0.3099510603588907,
"grad_norm": 3.722224712371826,
"learning_rate": 3.7800000000000002e-06,
"loss": 0.0123,
"step": 190
},
{
"epoch": 0.3262642740619902,
"grad_norm": 1.0197768211364746,
"learning_rate": 3.980000000000001e-06,
"loss": 0.0126,
"step": 200
},
{
"epoch": 0.3425774877650897,
"grad_norm": 2.238046884536743,
"learning_rate": 4.18e-06,
"loss": 0.0155,
"step": 210
},
{
"epoch": 0.35889070146818924,
"grad_norm": 2.2540082931518555,
"learning_rate": 4.38e-06,
"loss": 0.0064,
"step": 220
},
{
"epoch": 0.37520391517128876,
"grad_norm": 4.272034168243408,
"learning_rate": 4.58e-06,
"loss": 0.0144,
"step": 230
},
{
"epoch": 0.3915171288743883,
"grad_norm": 1.297784447669983,
"learning_rate": 4.78e-06,
"loss": 0.0183,
"step": 240
},
{
"epoch": 0.4078303425774878,
"grad_norm": 1.1175105571746826,
"learning_rate": 4.980000000000001e-06,
"loss": 0.01,
"step": 250
},
{
"epoch": 0.42414355628058725,
"grad_norm": 0.989632248878479,
"learning_rate": 5.18e-06,
"loss": 0.0185,
"step": 260
},
{
"epoch": 0.44045676998368677,
"grad_norm": 2.2630860805511475,
"learning_rate": 5.380000000000001e-06,
"loss": 0.004,
"step": 270
},
{
"epoch": 0.4567699836867863,
"grad_norm": 3.4459376335144043,
"learning_rate": 5.580000000000001e-06,
"loss": 0.0338,
"step": 280
},
{
"epoch": 0.4730831973898858,
"grad_norm": 2.0962679386138916,
"learning_rate": 5.78e-06,
"loss": 0.0078,
"step": 290
},
{
"epoch": 0.4893964110929853,
"grad_norm": 1.109755277633667,
"learning_rate": 5.98e-06,
"loss": 0.0133,
"step": 300
},
{
"epoch": 0.5057096247960848,
"grad_norm": 1.8200187683105469,
"learning_rate": 6.18e-06,
"loss": 0.0128,
"step": 310
},
{
"epoch": 0.5220228384991843,
"grad_norm": 0.8147674798965454,
"learning_rate": 6.380000000000001e-06,
"loss": 0.0139,
"step": 320
},
{
"epoch": 0.5383360522022839,
"grad_norm": 2.6240575313568115,
"learning_rate": 6.5800000000000005e-06,
"loss": 0.0153,
"step": 330
},
{
"epoch": 0.5546492659053833,
"grad_norm": 0.49103260040283203,
"learning_rate": 6.780000000000001e-06,
"loss": 0.0119,
"step": 340
},
{
"epoch": 0.5709624796084829,
"grad_norm": 1.4433389902114868,
"learning_rate": 6.98e-06,
"loss": 0.0169,
"step": 350
},
{
"epoch": 0.5872756933115824,
"grad_norm": 1.6671804189682007,
"learning_rate": 7.180000000000001e-06,
"loss": 0.0103,
"step": 360
},
{
"epoch": 0.6035889070146819,
"grad_norm": 1.7344307899475098,
"learning_rate": 7.3800000000000005e-06,
"loss": 0.0234,
"step": 370
},
{
"epoch": 0.6199021207177814,
"grad_norm": 1.4996588230133057,
"learning_rate": 7.58e-06,
"loss": 0.0138,
"step": 380
},
{
"epoch": 0.636215334420881,
"grad_norm": 1.5783976316452026,
"learning_rate": 7.78e-06,
"loss": 0.0219,
"step": 390
},
{
"epoch": 0.6525285481239804,
"grad_norm": 0.5455562472343445,
"learning_rate": 7.980000000000002e-06,
"loss": 0.0113,
"step": 400
},
{
"epoch": 0.6688417618270799,
"grad_norm": 1.6277507543563843,
"learning_rate": 8.18e-06,
"loss": 0.018,
"step": 410
},
{
"epoch": 0.6851549755301795,
"grad_norm": 2.473850965499878,
"learning_rate": 8.380000000000001e-06,
"loss": 0.0224,
"step": 420
},
{
"epoch": 0.7014681892332789,
"grad_norm": 1.4439480304718018,
"learning_rate": 8.580000000000001e-06,
"loss": 0.0156,
"step": 430
},
{
"epoch": 0.7177814029363785,
"grad_norm": 0.7241881489753723,
"learning_rate": 8.78e-06,
"loss": 0.0131,
"step": 440
},
{
"epoch": 0.734094616639478,
"grad_norm": 2.1885428428649902,
"learning_rate": 8.98e-06,
"loss": 0.019,
"step": 450
},
{
"epoch": 0.7504078303425775,
"grad_norm": 2.702345371246338,
"learning_rate": 9.180000000000002e-06,
"loss": 0.0253,
"step": 460
},
{
"epoch": 0.766721044045677,
"grad_norm": 3.975011110305786,
"learning_rate": 9.38e-06,
"loss": 0.0107,
"step": 470
},
{
"epoch": 0.7830342577487766,
"grad_norm": 2.3299198150634766,
"learning_rate": 9.58e-06,
"loss": 0.0212,
"step": 480
},
{
"epoch": 0.799347471451876,
"grad_norm": 1.317580223083496,
"learning_rate": 9.780000000000001e-06,
"loss": 0.0227,
"step": 490
},
{
"epoch": 0.8156606851549756,
"grad_norm": 2.4023892879486084,
"learning_rate": 9.980000000000001e-06,
"loss": 0.0365,
"step": 500
},
{
"epoch": 0.831973898858075,
"grad_norm": 2.641399383544922,
"learning_rate": 9.932785660941001e-06,
"loss": 0.0138,
"step": 510
},
{
"epoch": 0.8482871125611745,
"grad_norm": 2.7177205085754395,
"learning_rate": 9.858103061986558e-06,
"loss": 0.0145,
"step": 520
},
{
"epoch": 0.8646003262642741,
"grad_norm": 1.9215168952941895,
"learning_rate": 9.783420463032114e-06,
"loss": 0.0305,
"step": 530
},
{
"epoch": 0.8809135399673735,
"grad_norm": 2.8683998584747314,
"learning_rate": 9.708737864077671e-06,
"loss": 0.0183,
"step": 540
},
{
"epoch": 0.8972267536704731,
"grad_norm": 1.9078762531280518,
"learning_rate": 9.634055265123227e-06,
"loss": 0.0235,
"step": 550
},
{
"epoch": 0.9135399673735726,
"grad_norm": 3.7367427349090576,
"learning_rate": 9.559372666168784e-06,
"loss": 0.0175,
"step": 560
},
{
"epoch": 0.9298531810766721,
"grad_norm": 1.7846318483352661,
"learning_rate": 9.48469006721434e-06,
"loss": 0.0138,
"step": 570
},
{
"epoch": 0.9461663947797716,
"grad_norm": 3.428025245666504,
"learning_rate": 9.410007468259897e-06,
"loss": 0.0168,
"step": 580
},
{
"epoch": 0.9624796084828712,
"grad_norm": 2.601677417755127,
"learning_rate": 9.335324869305452e-06,
"loss": 0.0192,
"step": 590
},
{
"epoch": 0.9787928221859706,
"grad_norm": 3.509207248687744,
"learning_rate": 9.26064227035101e-06,
"loss": 0.0177,
"step": 600
},
{
"epoch": 0.9787928221859706,
"eval_loss": 0.011165497824549675,
"eval_runtime": 1766.1438,
"eval_samples_per_second": 2.775,
"eval_steps_per_second": 0.347,
"eval_wer": 1.0189228529839884,
"step": 600
},
{
"epoch": 0.9951060358890701,
"grad_norm": 2.4465436935424805,
"learning_rate": 9.185959671396565e-06,
"loss": 0.0168,
"step": 610
},
{
"epoch": 1.0114192495921697,
"grad_norm": 3.492405414581299,
"learning_rate": 9.111277072442123e-06,
"loss": 0.0096,
"step": 620
},
{
"epoch": 1.0277324632952691,
"grad_norm": 0.9142910242080688,
"learning_rate": 9.036594473487678e-06,
"loss": 0.0106,
"step": 630
},
{
"epoch": 1.0440456769983686,
"grad_norm": 0.7352218627929688,
"learning_rate": 8.961911874533234e-06,
"loss": 0.0085,
"step": 640
},
{
"epoch": 1.0603588907014683,
"grad_norm": 1.1586534976959229,
"learning_rate": 8.887229275578791e-06,
"loss": 0.0068,
"step": 650
},
{
"epoch": 1.0766721044045677,
"grad_norm": 2.758241891860962,
"learning_rate": 8.812546676624347e-06,
"loss": 0.0084,
"step": 660
},
{
"epoch": 1.0929853181076672,
"grad_norm": 0.44431474804878235,
"learning_rate": 8.737864077669904e-06,
"loss": 0.0087,
"step": 670
},
{
"epoch": 1.1092985318107667,
"grad_norm": 1.2769174575805664,
"learning_rate": 8.66318147871546e-06,
"loss": 0.0096,
"step": 680
},
{
"epoch": 1.1256117455138663,
"grad_norm": 2.656780242919922,
"learning_rate": 8.588498879761017e-06,
"loss": 0.0127,
"step": 690
},
{
"epoch": 1.1419249592169658,
"grad_norm": 1.0895354747772217,
"learning_rate": 8.513816280806572e-06,
"loss": 0.0112,
"step": 700
},
{
"epoch": 1.1582381729200653,
"grad_norm": 1.0903260707855225,
"learning_rate": 8.43913368185213e-06,
"loss": 0.0091,
"step": 710
},
{
"epoch": 1.1745513866231647,
"grad_norm": 2.6359965801239014,
"learning_rate": 8.364451082897685e-06,
"loss": 0.0193,
"step": 720
},
{
"epoch": 1.1908646003262642,
"grad_norm": 0.9313237071037292,
"learning_rate": 8.289768483943242e-06,
"loss": 0.0073,
"step": 730
},
{
"epoch": 1.2071778140293639,
"grad_norm": 0.7475559115409851,
"learning_rate": 8.215085884988798e-06,
"loss": 0.0075,
"step": 740
},
{
"epoch": 1.2234910277324633,
"grad_norm": 0.24283719062805176,
"learning_rate": 8.140403286034355e-06,
"loss": 0.0071,
"step": 750
},
{
"epoch": 1.2398042414355628,
"grad_norm": 0.6300885677337646,
"learning_rate": 8.06572068707991e-06,
"loss": 0.006,
"step": 760
},
{
"epoch": 1.2561174551386622,
"grad_norm": 1.7239911556243896,
"learning_rate": 7.991038088125468e-06,
"loss": 0.0137,
"step": 770
},
{
"epoch": 1.272430668841762,
"grad_norm": 1.5679051876068115,
"learning_rate": 7.916355489171023e-06,
"loss": 0.0047,
"step": 780
},
{
"epoch": 1.2887438825448614,
"grad_norm": 1.0264155864715576,
"learning_rate": 7.84167289021658e-06,
"loss": 0.0074,
"step": 790
},
{
"epoch": 1.3050570962479608,
"grad_norm": 1.0627104043960571,
"learning_rate": 7.766990291262136e-06,
"loss": 0.0106,
"step": 800
},
{
"epoch": 1.3213703099510603,
"grad_norm": 1.262370228767395,
"learning_rate": 7.692307692307694e-06,
"loss": 0.0072,
"step": 810
},
{
"epoch": 1.3376835236541598,
"grad_norm": 1.5836150646209717,
"learning_rate": 7.617625093353249e-06,
"loss": 0.0092,
"step": 820
},
{
"epoch": 1.3539967373572595,
"grad_norm": 1.4232890605926514,
"learning_rate": 7.542942494398806e-06,
"loss": 0.0089,
"step": 830
},
{
"epoch": 1.370309951060359,
"grad_norm": 0.49621379375457764,
"learning_rate": 7.468259895444362e-06,
"loss": 0.0066,
"step": 840
},
{
"epoch": 1.3866231647634584,
"grad_norm": 1.060057520866394,
"learning_rate": 7.393577296489919e-06,
"loss": 0.0065,
"step": 850
},
{
"epoch": 1.4029363784665578,
"grad_norm": 1.5507824420928955,
"learning_rate": 7.318894697535475e-06,
"loss": 0.0086,
"step": 860
},
{
"epoch": 1.4192495921696575,
"grad_norm": 0.6949241757392883,
"learning_rate": 7.244212098581031e-06,
"loss": 0.0066,
"step": 870
},
{
"epoch": 1.435562805872757,
"grad_norm": 3.595614194869995,
"learning_rate": 7.169529499626588e-06,
"loss": 0.0084,
"step": 880
},
{
"epoch": 1.4518760195758564,
"grad_norm": 0.38814422488212585,
"learning_rate": 7.094846900672144e-06,
"loss": 0.0086,
"step": 890
},
{
"epoch": 1.468189233278956,
"grad_norm": 0.612360417842865,
"learning_rate": 7.0201643017177005e-06,
"loss": 0.0087,
"step": 900
},
{
"epoch": 1.4845024469820554,
"grad_norm": 1.359798789024353,
"learning_rate": 6.945481702763257e-06,
"loss": 0.0052,
"step": 910
},
{
"epoch": 1.5008156606851548,
"grad_norm": 2.678591251373291,
"learning_rate": 6.870799103808813e-06,
"loss": 0.0079,
"step": 920
},
{
"epoch": 1.5171288743882545,
"grad_norm": 1.1047519445419312,
"learning_rate": 6.79611650485437e-06,
"loss": 0.0047,
"step": 930
},
{
"epoch": 1.533442088091354,
"grad_norm": 1.798194169998169,
"learning_rate": 6.721433905899925e-06,
"loss": 0.0125,
"step": 940
},
{
"epoch": 1.5497553017944536,
"grad_norm": 0.44330868124961853,
"learning_rate": 6.6467513069454825e-06,
"loss": 0.0074,
"step": 950
},
{
"epoch": 1.566068515497553,
"grad_norm": 1.6538296937942505,
"learning_rate": 6.572068707991038e-06,
"loss": 0.0118,
"step": 960
},
{
"epoch": 1.5823817292006526,
"grad_norm": 1.2385334968566895,
"learning_rate": 6.497386109036595e-06,
"loss": 0.0091,
"step": 970
},
{
"epoch": 1.598694942903752,
"grad_norm": 1.1827936172485352,
"learning_rate": 6.422703510082151e-06,
"loss": 0.008,
"step": 980
},
{
"epoch": 1.6150081566068515,
"grad_norm": 1.1670842170715332,
"learning_rate": 6.348020911127708e-06,
"loss": 0.004,
"step": 990
},
{
"epoch": 1.631321370309951,
"grad_norm": 1.9073982238769531,
"learning_rate": 6.273338312173264e-06,
"loss": 0.0105,
"step": 1000
},
{
"epoch": 1.6476345840130504,
"grad_norm": 0.6385743021965027,
"learning_rate": 6.198655713218821e-06,
"loss": 0.0047,
"step": 1010
},
{
"epoch": 1.66394779771615,
"grad_norm": 0.717432975769043,
"learning_rate": 6.123973114264377e-06,
"loss": 0.0104,
"step": 1020
},
{
"epoch": 1.6802610114192496,
"grad_norm": 1.6229373216629028,
"learning_rate": 6.049290515309934e-06,
"loss": 0.014,
"step": 1030
},
{
"epoch": 1.6965742251223492,
"grad_norm": 1.5651880502700806,
"learning_rate": 5.974607916355489e-06,
"loss": 0.0081,
"step": 1040
},
{
"epoch": 1.7128874388254487,
"grad_norm": 1.444212794303894,
"learning_rate": 5.899925317401046e-06,
"loss": 0.0099,
"step": 1050
},
{
"epoch": 1.7292006525285482,
"grad_norm": 1.1551159620285034,
"learning_rate": 5.825242718446602e-06,
"loss": 0.0087,
"step": 1060
},
{
"epoch": 1.7455138662316476,
"grad_norm": 0.7630689740180969,
"learning_rate": 5.750560119492159e-06,
"loss": 0.0056,
"step": 1070
},
{
"epoch": 1.761827079934747,
"grad_norm": 0.6756051778793335,
"learning_rate": 5.675877520537715e-06,
"loss": 0.0057,
"step": 1080
},
{
"epoch": 1.7781402936378465,
"grad_norm": 0.47443750500679016,
"learning_rate": 5.6011949215832715e-06,
"loss": 0.0068,
"step": 1090
},
{
"epoch": 1.7944535073409462,
"grad_norm": 0.8193678855895996,
"learning_rate": 5.526512322628828e-06,
"loss": 0.0079,
"step": 1100
},
{
"epoch": 1.8107667210440457,
"grad_norm": 0.7489306330680847,
"learning_rate": 5.451829723674384e-06,
"loss": 0.0072,
"step": 1110
},
{
"epoch": 1.8270799347471451,
"grad_norm": 0.9458585977554321,
"learning_rate": 5.37714712471994e-06,
"loss": 0.0047,
"step": 1120
},
{
"epoch": 1.8433931484502448,
"grad_norm": 0.8624395132064819,
"learning_rate": 5.302464525765497e-06,
"loss": 0.0053,
"step": 1130
},
{
"epoch": 1.8597063621533443,
"grad_norm": 0.5177104473114014,
"learning_rate": 5.227781926811053e-06,
"loss": 0.0074,
"step": 1140
},
{
"epoch": 1.8760195758564437,
"grad_norm": 1.2944625616073608,
"learning_rate": 5.15309932785661e-06,
"loss": 0.0151,
"step": 1150
},
{
"epoch": 1.8923327895595432,
"grad_norm": 1.9204165935516357,
"learning_rate": 5.0784167289021655e-06,
"loss": 0.0082,
"step": 1160
},
{
"epoch": 1.9086460032626427,
"grad_norm": 2.2888307571411133,
"learning_rate": 5.003734129947723e-06,
"loss": 0.0057,
"step": 1170
},
{
"epoch": 1.9249592169657421,
"grad_norm": 1.0358986854553223,
"learning_rate": 4.929051530993279e-06,
"loss": 0.0087,
"step": 1180
},
{
"epoch": 1.9412724306688418,
"grad_norm": 0.8882740139961243,
"learning_rate": 4.854368932038836e-06,
"loss": 0.0111,
"step": 1190
},
{
"epoch": 1.9575856443719413,
"grad_norm": 1.2315634489059448,
"learning_rate": 4.779686333084392e-06,
"loss": 0.0055,
"step": 1200
},
{
"epoch": 1.9575856443719413,
"eval_loss": 0.005837247706949711,
"eval_runtime": 1757.0337,
"eval_samples_per_second": 2.789,
"eval_steps_per_second": 0.349,
"eval_wer": 0.4533848760350299,
"step": 1200
},
{
"epoch": 1.9738988580750407,
"grad_norm": 1.227731466293335,
"learning_rate": 4.7050037341299485e-06,
"loss": 0.0039,
"step": 1210
},
{
"epoch": 1.9902120717781404,
"grad_norm": 2.077256917953491,
"learning_rate": 4.630321135175505e-06,
"loss": 0.0054,
"step": 1220
},
{
"epoch": 2.00652528548124,
"grad_norm": 0.7639275789260864,
"learning_rate": 4.555638536221061e-06,
"loss": 0.0055,
"step": 1230
},
{
"epoch": 2.0228384991843393,
"grad_norm": 0.3696356415748596,
"learning_rate": 4.480955937266617e-06,
"loss": 0.0044,
"step": 1240
},
{
"epoch": 2.039151712887439,
"grad_norm": 0.4084968566894531,
"learning_rate": 4.406273338312173e-06,
"loss": 0.0037,
"step": 1250
},
{
"epoch": 2.0554649265905383,
"grad_norm": 0.19460849463939667,
"learning_rate": 4.33159073935773e-06,
"loss": 0.0023,
"step": 1260
},
{
"epoch": 2.0717781402936377,
"grad_norm": 0.293778657913208,
"learning_rate": 4.256908140403286e-06,
"loss": 0.0034,
"step": 1270
},
{
"epoch": 2.088091353996737,
"grad_norm": 0.0963997095823288,
"learning_rate": 4.1822255414488425e-06,
"loss": 0.0011,
"step": 1280
},
{
"epoch": 2.104404567699837,
"grad_norm": 0.1960829347372055,
"learning_rate": 4.107542942494399e-06,
"loss": 0.002,
"step": 1290
},
{
"epoch": 2.1207177814029365,
"grad_norm": 0.22539754211902618,
"learning_rate": 4.032860343539955e-06,
"loss": 0.0033,
"step": 1300
},
{
"epoch": 2.137030995106036,
"grad_norm": 0.43771710991859436,
"learning_rate": 3.958177744585512e-06,
"loss": 0.0023,
"step": 1310
},
{
"epoch": 2.1533442088091355,
"grad_norm": 0.39533042907714844,
"learning_rate": 3.883495145631068e-06,
"loss": 0.0024,
"step": 1320
},
{
"epoch": 2.169657422512235,
"grad_norm": 0.1653490662574768,
"learning_rate": 3.8088125466766246e-06,
"loss": 0.0022,
"step": 1330
},
{
"epoch": 2.1859706362153344,
"grad_norm": 0.07081114500761032,
"learning_rate": 3.734129947722181e-06,
"loss": 0.0019,
"step": 1340
},
{
"epoch": 2.202283849918434,
"grad_norm": 0.4014896750450134,
"learning_rate": 3.6594473487677374e-06,
"loss": 0.0051,
"step": 1350
},
{
"epoch": 2.2185970636215333,
"grad_norm": 0.23356999456882477,
"learning_rate": 3.584764749813294e-06,
"loss": 0.003,
"step": 1360
},
{
"epoch": 2.2349102773246328,
"grad_norm": 0.10898713022470474,
"learning_rate": 3.5100821508588502e-06,
"loss": 0.0024,
"step": 1370
},
{
"epoch": 2.2512234910277327,
"grad_norm": 0.08161570131778717,
"learning_rate": 3.4353995519044066e-06,
"loss": 0.005,
"step": 1380
},
{
"epoch": 2.267536704730832,
"grad_norm": 0.5079241991043091,
"learning_rate": 3.3607169529499626e-06,
"loss": 0.0047,
"step": 1390
},
{
"epoch": 2.2838499184339316,
"grad_norm": 0.9440802335739136,
"learning_rate": 3.286034353995519e-06,
"loss": 0.005,
"step": 1400
},
{
"epoch": 2.300163132137031,
"grad_norm": 0.3201453685760498,
"learning_rate": 3.2113517550410755e-06,
"loss": 0.0025,
"step": 1410
},
{
"epoch": 2.3164763458401305,
"grad_norm": 0.4434724748134613,
"learning_rate": 3.136669156086632e-06,
"loss": 0.0046,
"step": 1420
},
{
"epoch": 2.33278955954323,
"grad_norm": 0.30961957573890686,
"learning_rate": 3.0619865571321883e-06,
"loss": 0.0017,
"step": 1430
},
{
"epoch": 2.3491027732463294,
"grad_norm": 0.16197660565376282,
"learning_rate": 2.9873039581777447e-06,
"loss": 0.0028,
"step": 1440
},
{
"epoch": 2.365415986949429,
"grad_norm": 0.6526560187339783,
"learning_rate": 2.912621359223301e-06,
"loss": 0.0048,
"step": 1450
},
{
"epoch": 2.3817292006525284,
"grad_norm": 0.07773681730031967,
"learning_rate": 2.8379387602688575e-06,
"loss": 0.0016,
"step": 1460
},
{
"epoch": 2.3980424143556283,
"grad_norm": 0.9661878943443298,
"learning_rate": 2.763256161314414e-06,
"loss": 0.0061,
"step": 1470
},
{
"epoch": 2.4143556280587277,
"grad_norm": 1.0361403226852417,
"learning_rate": 2.68857356235997e-06,
"loss": 0.0079,
"step": 1480
},
{
"epoch": 2.430668841761827,
"grad_norm": 0.09448342025279999,
"learning_rate": 2.6138909634055264e-06,
"loss": 0.0019,
"step": 1490
},
{
"epoch": 2.4469820554649266,
"grad_norm": 0.06374044716358185,
"learning_rate": 2.5392083644510828e-06,
"loss": 0.0016,
"step": 1500
},
{
"epoch": 2.463295269168026,
"grad_norm": 0.5809449553489685,
"learning_rate": 2.4645257654966396e-06,
"loss": 0.0023,
"step": 1510
},
{
"epoch": 2.4796084828711256,
"grad_norm": 0.1248660460114479,
"learning_rate": 2.389843166542196e-06,
"loss": 0.0022,
"step": 1520
},
{
"epoch": 2.495921696574225,
"grad_norm": 0.06973249465227127,
"learning_rate": 2.3151605675877524e-06,
"loss": 0.003,
"step": 1530
},
{
"epoch": 2.5122349102773245,
"grad_norm": 4.4841694831848145,
"learning_rate": 2.2404779686333084e-06,
"loss": 0.0028,
"step": 1540
},
{
"epoch": 2.528548123980424,
"grad_norm": 0.12578125298023224,
"learning_rate": 2.165795369678865e-06,
"loss": 0.0018,
"step": 1550
},
{
"epoch": 2.544861337683524,
"grad_norm": 0.4544123113155365,
"learning_rate": 2.0911127707244213e-06,
"loss": 0.0019,
"step": 1560
},
{
"epoch": 2.5611745513866233,
"grad_norm": 0.19402466714382172,
"learning_rate": 2.0164301717699777e-06,
"loss": 0.0022,
"step": 1570
},
{
"epoch": 2.5774877650897228,
"grad_norm": 0.790777325630188,
"learning_rate": 1.941747572815534e-06,
"loss": 0.0014,
"step": 1580
},
{
"epoch": 2.5938009787928222,
"grad_norm": 0.07585720717906952,
"learning_rate": 1.8670649738610905e-06,
"loss": 0.0016,
"step": 1590
},
{
"epoch": 2.6101141924959217,
"grad_norm": 1.340345025062561,
"learning_rate": 1.792382374906647e-06,
"loss": 0.0037,
"step": 1600
},
{
"epoch": 2.626427406199021,
"grad_norm": 1.7285263538360596,
"learning_rate": 1.7176997759522033e-06,
"loss": 0.0033,
"step": 1610
},
{
"epoch": 2.6427406199021206,
"grad_norm": 0.09814145416021347,
"learning_rate": 1.6430171769977595e-06,
"loss": 0.0041,
"step": 1620
},
{
"epoch": 2.65905383360522,
"grad_norm": 0.13084454834461212,
"learning_rate": 1.568334578043316e-06,
"loss": 0.0019,
"step": 1630
},
{
"epoch": 2.6753670473083195,
"grad_norm": 0.07720234245061874,
"learning_rate": 1.4936519790888724e-06,
"loss": 0.0032,
"step": 1640
},
{
"epoch": 2.6916802610114194,
"grad_norm": 0.8523921966552734,
"learning_rate": 1.4189693801344288e-06,
"loss": 0.0018,
"step": 1650
},
{
"epoch": 2.707993474714519,
"grad_norm": 0.09405750036239624,
"learning_rate": 1.344286781179985e-06,
"loss": 0.0021,
"step": 1660
},
{
"epoch": 2.7243066884176184,
"grad_norm": 0.11995333433151245,
"learning_rate": 1.2696041822255414e-06,
"loss": 0.0032,
"step": 1670
},
{
"epoch": 2.740619902120718,
"grad_norm": 0.6443132162094116,
"learning_rate": 1.194921583271098e-06,
"loss": 0.002,
"step": 1680
},
{
"epoch": 2.7569331158238173,
"grad_norm": 0.29005166888237,
"learning_rate": 1.1202389843166542e-06,
"loss": 0.0035,
"step": 1690
},
{
"epoch": 2.7732463295269167,
"grad_norm": 0.6452990770339966,
"learning_rate": 1.0455563853622106e-06,
"loss": 0.0018,
"step": 1700
},
{
"epoch": 2.789559543230016,
"grad_norm": 0.10402486473321915,
"learning_rate": 9.70873786407767e-07,
"loss": 0.0041,
"step": 1710
},
{
"epoch": 2.8058727569331157,
"grad_norm": 0.11018693447113037,
"learning_rate": 8.961911874533235e-07,
"loss": 0.0016,
"step": 1720
},
{
"epoch": 2.822185970636215,
"grad_norm": 0.1631583273410797,
"learning_rate": 8.215085884988798e-07,
"loss": 0.0018,
"step": 1730
},
{
"epoch": 2.838499184339315,
"grad_norm": 1.892919898033142,
"learning_rate": 7.468259895444362e-07,
"loss": 0.0097,
"step": 1740
},
{
"epoch": 2.8548123980424145,
"grad_norm": 1.0992926359176636,
"learning_rate": 6.721433905899925e-07,
"loss": 0.003,
"step": 1750
},
{
"epoch": 2.871125611745514,
"grad_norm": 0.08139196038246155,
"learning_rate": 5.97460791635549e-07,
"loss": 0.0017,
"step": 1760
},
{
"epoch": 2.8874388254486134,
"grad_norm": 2.4018540382385254,
"learning_rate": 5.227781926811053e-07,
"loss": 0.002,
"step": 1770
},
{
"epoch": 2.903752039151713,
"grad_norm": 1.0262824296951294,
"learning_rate": 4.4809559372666173e-07,
"loss": 0.0024,
"step": 1780
},
{
"epoch": 2.9200652528548123,
"grad_norm": 1.249171257019043,
"learning_rate": 3.734129947722181e-07,
"loss": 0.0029,
"step": 1790
},
{
"epoch": 2.936378466557912,
"grad_norm": 0.15619555115699768,
"learning_rate": 2.987303958177745e-07,
"loss": 0.0019,
"step": 1800
},
{
"epoch": 2.936378466557912,
"eval_loss": 0.003976329229772091,
"eval_runtime": 1760.2683,
"eval_samples_per_second": 2.784,
"eval_steps_per_second": 0.348,
"eval_wer": 0.25532727229341157,
"step": 1800
},
{
"epoch": 2.9526916802610113,
"grad_norm": 0.38771194219589233,
"learning_rate": 2.2404779686333086e-07,
"loss": 0.0017,
"step": 1810
},
{
"epoch": 2.9690048939641107,
"grad_norm": 0.9996446371078491,
"learning_rate": 1.4936519790888725e-07,
"loss": 0.0017,
"step": 1820
},
{
"epoch": 2.9853181076672106,
"grad_norm": 0.1012411043047905,
"learning_rate": 7.468259895444363e-08,
"loss": 0.0031,
"step": 1830
}
],
"logging_steps": 10,
"max_steps": 1839,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 600,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.697141880741888e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}