SALAMA_C3 / checkpoint-5000 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
32e5646 verified
{
"best_global_step": 5000,
"best_metric": 4.457382838598836,
"best_model_checkpoint": "./SALAMA_C3/checkpoint-5000",
"epoch": 8.375209380234505,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03350083752093802,
"grad_norm": 11.757207870483398,
"learning_rate": 6.333333333333334e-07,
"loss": 0.5112,
"step": 20
},
{
"epoch": 0.06700167504187604,
"grad_norm": 10.197953224182129,
"learning_rate": 1.3e-06,
"loss": 0.5283,
"step": 40
},
{
"epoch": 0.10050251256281408,
"grad_norm": 12.811074256896973,
"learning_rate": 1.9666666666666668e-06,
"loss": 0.474,
"step": 60
},
{
"epoch": 0.13400335008375208,
"grad_norm": 8.559342384338379,
"learning_rate": 2.6e-06,
"loss": 0.424,
"step": 80
},
{
"epoch": 0.16750418760469013,
"grad_norm": 6.9629225730896,
"learning_rate": 3.266666666666667e-06,
"loss": 0.4465,
"step": 100
},
{
"epoch": 0.20100502512562815,
"grad_norm": 7.833058834075928,
"learning_rate": 3.9333333333333335e-06,
"loss": 0.3764,
"step": 120
},
{
"epoch": 0.23450586264656617,
"grad_norm": 6.882424831390381,
"learning_rate": 4.600000000000001e-06,
"loss": 0.3705,
"step": 140
},
{
"epoch": 0.26800670016750416,
"grad_norm": 5.4183244705200195,
"learning_rate": 5.2666666666666665e-06,
"loss": 0.4116,
"step": 160
},
{
"epoch": 0.3015075376884422,
"grad_norm": 6.103787899017334,
"learning_rate": 5.933333333333335e-06,
"loss": 0.3903,
"step": 180
},
{
"epoch": 0.33500837520938026,
"grad_norm": 6.084160804748535,
"learning_rate": 6.600000000000001e-06,
"loss": 0.3794,
"step": 200
},
{
"epoch": 0.3685092127303183,
"grad_norm": 4.531406879425049,
"learning_rate": 7.266666666666668e-06,
"loss": 0.3904,
"step": 220
},
{
"epoch": 0.4020100502512563,
"grad_norm": 6.702854156494141,
"learning_rate": 7.933333333333334e-06,
"loss": 0.3672,
"step": 240
},
{
"epoch": 0.4355108877721943,
"grad_norm": 6.386377811431885,
"learning_rate": 8.6e-06,
"loss": 0.3379,
"step": 260
},
{
"epoch": 0.46901172529313234,
"grad_norm": 5.33281135559082,
"learning_rate": 9.266666666666667e-06,
"loss": 0.4102,
"step": 280
},
{
"epoch": 0.5025125628140703,
"grad_norm": 6.031435012817383,
"learning_rate": 9.933333333333334e-06,
"loss": 0.3815,
"step": 300
},
{
"epoch": 0.5360134003350083,
"grad_norm": 4.8638176918029785,
"learning_rate": 9.968253968253969e-06,
"loss": 0.358,
"step": 320
},
{
"epoch": 0.5695142378559463,
"grad_norm": 5.352113723754883,
"learning_rate": 9.932980599647268e-06,
"loss": 0.3578,
"step": 340
},
{
"epoch": 0.6030150753768844,
"grad_norm": 5.560739040374756,
"learning_rate": 9.897707231040565e-06,
"loss": 0.3795,
"step": 360
},
{
"epoch": 0.6365159128978225,
"grad_norm": 6.186940670013428,
"learning_rate": 9.862433862433864e-06,
"loss": 0.3852,
"step": 380
},
{
"epoch": 0.6700167504187605,
"grad_norm": 5.37507438659668,
"learning_rate": 9.827160493827161e-06,
"loss": 0.3761,
"step": 400
},
{
"epoch": 0.7035175879396985,
"grad_norm": 6.373449802398682,
"learning_rate": 9.79188712522046e-06,
"loss": 0.3674,
"step": 420
},
{
"epoch": 0.7370184254606366,
"grad_norm": 5.702625274658203,
"learning_rate": 9.756613756613757e-06,
"loss": 0.4013,
"step": 440
},
{
"epoch": 0.7705192629815746,
"grad_norm": 5.5675153732299805,
"learning_rate": 9.721340388007056e-06,
"loss": 0.3495,
"step": 460
},
{
"epoch": 0.8040201005025126,
"grad_norm": 6.296374320983887,
"learning_rate": 9.686067019400353e-06,
"loss": 0.3704,
"step": 480
},
{
"epoch": 0.8375209380234506,
"grad_norm": 4.830463886260986,
"learning_rate": 9.650793650793652e-06,
"loss": 0.36,
"step": 500
},
{
"epoch": 0.8375209380234506,
"eval_loss": 0.27764827013015747,
"eval_runtime": 1752.3826,
"eval_samples_per_second": 2.725,
"eval_steps_per_second": 0.341,
"eval_wer": 19.59291781998664,
"step": 500
},
{
"epoch": 0.8710217755443886,
"grad_norm": 4.863058090209961,
"learning_rate": 9.61552028218695e-06,
"loss": 0.3874,
"step": 520
},
{
"epoch": 0.9045226130653267,
"grad_norm": 6.121027946472168,
"learning_rate": 9.580246913580248e-06,
"loss": 0.382,
"step": 540
},
{
"epoch": 0.9380234505862647,
"grad_norm": 4.538082599639893,
"learning_rate": 9.544973544973546e-06,
"loss": 0.3611,
"step": 560
},
{
"epoch": 0.9715242881072027,
"grad_norm": 4.8772382736206055,
"learning_rate": 9.509700176366844e-06,
"loss": 0.3584,
"step": 580
},
{
"epoch": 1.0050251256281406,
"grad_norm": 4.039212703704834,
"learning_rate": 9.474426807760142e-06,
"loss": 0.3701,
"step": 600
},
{
"epoch": 1.0385259631490786,
"grad_norm": 4.787687301635742,
"learning_rate": 9.43915343915344e-06,
"loss": 0.2058,
"step": 620
},
{
"epoch": 1.0720268006700167,
"grad_norm": 4.503021717071533,
"learning_rate": 9.403880070546738e-06,
"loss": 0.2455,
"step": 640
},
{
"epoch": 1.1055276381909547,
"grad_norm": 6.663857936859131,
"learning_rate": 9.368606701940036e-06,
"loss": 0.2179,
"step": 660
},
{
"epoch": 1.1390284757118927,
"grad_norm": 4.486196041107178,
"learning_rate": 9.333333333333334e-06,
"loss": 0.2196,
"step": 680
},
{
"epoch": 1.1725293132328307,
"grad_norm": 4.667060852050781,
"learning_rate": 9.298059964726633e-06,
"loss": 0.2183,
"step": 700
},
{
"epoch": 1.2060301507537687,
"grad_norm": 5.608316898345947,
"learning_rate": 9.26278659611993e-06,
"loss": 0.2161,
"step": 720
},
{
"epoch": 1.2395309882747068,
"grad_norm": 4.2184271812438965,
"learning_rate": 9.227513227513229e-06,
"loss": 0.2382,
"step": 740
},
{
"epoch": 1.2730318257956448,
"grad_norm": 3.871945381164551,
"learning_rate": 9.192239858906526e-06,
"loss": 0.2214,
"step": 760
},
{
"epoch": 1.3065326633165828,
"grad_norm": 3.730222225189209,
"learning_rate": 9.156966490299825e-06,
"loss": 0.2213,
"step": 780
},
{
"epoch": 1.3400335008375208,
"grad_norm": 4.740777015686035,
"learning_rate": 9.121693121693122e-06,
"loss": 0.2503,
"step": 800
},
{
"epoch": 1.3735343383584588,
"grad_norm": 4.123469352722168,
"learning_rate": 9.086419753086421e-06,
"loss": 0.2443,
"step": 820
},
{
"epoch": 1.4070351758793969,
"grad_norm": 3.988917827606201,
"learning_rate": 9.051146384479718e-06,
"loss": 0.241,
"step": 840
},
{
"epoch": 1.4405360134003349,
"grad_norm": 5.4730305671691895,
"learning_rate": 9.015873015873017e-06,
"loss": 0.2163,
"step": 860
},
{
"epoch": 1.474036850921273,
"grad_norm": 5.020652770996094,
"learning_rate": 8.980599647266314e-06,
"loss": 0.2387,
"step": 880
},
{
"epoch": 1.507537688442211,
"grad_norm": 4.231486797332764,
"learning_rate": 8.945326278659613e-06,
"loss": 0.2523,
"step": 900
},
{
"epoch": 1.541038525963149,
"grad_norm": 6.197975158691406,
"learning_rate": 8.910052910052912e-06,
"loss": 0.2255,
"step": 920
},
{
"epoch": 1.574539363484087,
"grad_norm": 5.489210605621338,
"learning_rate": 8.874779541446209e-06,
"loss": 0.2397,
"step": 940
},
{
"epoch": 1.608040201005025,
"grad_norm": 4.32737398147583,
"learning_rate": 8.839506172839508e-06,
"loss": 0.2298,
"step": 960
},
{
"epoch": 1.641541038525963,
"grad_norm": 4.504214763641357,
"learning_rate": 8.804232804232805e-06,
"loss": 0.2301,
"step": 980
},
{
"epoch": 1.675041876046901,
"grad_norm": 3.9694759845733643,
"learning_rate": 8.768959435626104e-06,
"loss": 0.2338,
"step": 1000
},
{
"epoch": 1.675041876046901,
"eval_loss": 0.18838582932949066,
"eval_runtime": 1770.3449,
"eval_samples_per_second": 2.698,
"eval_steps_per_second": 0.337,
"eval_wer": 14.071299035983584,
"step": 1000
},
{
"epoch": 1.708542713567839,
"grad_norm": 4.972934722900391,
"learning_rate": 8.7336860670194e-06,
"loss": 0.2388,
"step": 1020
},
{
"epoch": 1.742043551088777,
"grad_norm": 5.178994655609131,
"learning_rate": 8.6984126984127e-06,
"loss": 0.2531,
"step": 1040
},
{
"epoch": 1.775544388609715,
"grad_norm": 3.560372829437256,
"learning_rate": 8.663139329805997e-06,
"loss": 0.2187,
"step": 1060
},
{
"epoch": 1.809045226130653,
"grad_norm": 4.647324562072754,
"learning_rate": 8.627865961199296e-06,
"loss": 0.2413,
"step": 1080
},
{
"epoch": 1.8425460636515911,
"grad_norm": 3.6784422397613525,
"learning_rate": 8.592592592592593e-06,
"loss": 0.2242,
"step": 1100
},
{
"epoch": 1.8760469011725294,
"grad_norm": 5.928018093109131,
"learning_rate": 8.557319223985891e-06,
"loss": 0.2406,
"step": 1120
},
{
"epoch": 1.9095477386934674,
"grad_norm": 6.358487606048584,
"learning_rate": 8.52204585537919e-06,
"loss": 0.2482,
"step": 1140
},
{
"epoch": 1.9430485762144054,
"grad_norm": 4.409506797790527,
"learning_rate": 8.486772486772487e-06,
"loss": 0.2415,
"step": 1160
},
{
"epoch": 1.9765494137353434,
"grad_norm": 5.169639587402344,
"learning_rate": 8.451499118165786e-06,
"loss": 0.222,
"step": 1180
},
{
"epoch": 2.0100502512562812,
"grad_norm": 2.8841328620910645,
"learning_rate": 8.416225749559083e-06,
"loss": 0.2109,
"step": 1200
},
{
"epoch": 2.0435510887772192,
"grad_norm": 3.835498332977295,
"learning_rate": 8.380952380952382e-06,
"loss": 0.1284,
"step": 1220
},
{
"epoch": 2.0770519262981573,
"grad_norm": 2.584859848022461,
"learning_rate": 8.34567901234568e-06,
"loss": 0.1059,
"step": 1240
},
{
"epoch": 2.1105527638190953,
"grad_norm": 2.797682285308838,
"learning_rate": 8.310405643738978e-06,
"loss": 0.0967,
"step": 1260
},
{
"epoch": 2.1440536013400333,
"grad_norm": 3.2937309741973877,
"learning_rate": 8.275132275132275e-06,
"loss": 0.1267,
"step": 1280
},
{
"epoch": 2.1775544388609713,
"grad_norm": 3.344325065612793,
"learning_rate": 8.239858906525574e-06,
"loss": 0.1354,
"step": 1300
},
{
"epoch": 2.2110552763819094,
"grad_norm": 2.955522060394287,
"learning_rate": 8.204585537918873e-06,
"loss": 0.104,
"step": 1320
},
{
"epoch": 2.2445561139028474,
"grad_norm": 5.180908203125,
"learning_rate": 8.16931216931217e-06,
"loss": 0.1229,
"step": 1340
},
{
"epoch": 2.2780569514237854,
"grad_norm": 3.639417886734009,
"learning_rate": 8.134038800705469e-06,
"loss": 0.104,
"step": 1360
},
{
"epoch": 2.3115577889447234,
"grad_norm": 3.7380902767181396,
"learning_rate": 8.098765432098766e-06,
"loss": 0.1141,
"step": 1380
},
{
"epoch": 2.3450586264656614,
"grad_norm": 4.100318908691406,
"learning_rate": 8.063492063492065e-06,
"loss": 0.1123,
"step": 1400
},
{
"epoch": 2.3785594639865995,
"grad_norm": 2.8147048950195312,
"learning_rate": 8.028218694885362e-06,
"loss": 0.1039,
"step": 1420
},
{
"epoch": 2.4120603015075375,
"grad_norm": 3.7376410961151123,
"learning_rate": 7.99294532627866e-06,
"loss": 0.1179,
"step": 1440
},
{
"epoch": 2.4455611390284755,
"grad_norm": 4.320065975189209,
"learning_rate": 7.957671957671958e-06,
"loss": 0.1167,
"step": 1460
},
{
"epoch": 2.4790619765494135,
"grad_norm": 4.415127277374268,
"learning_rate": 7.922398589065257e-06,
"loss": 0.125,
"step": 1480
},
{
"epoch": 2.5125628140703515,
"grad_norm": 2.631763219833374,
"learning_rate": 7.887125220458554e-06,
"loss": 0.1188,
"step": 1500
},
{
"epoch": 2.5125628140703515,
"eval_loss": 0.13635103404521942,
"eval_runtime": 1806.1099,
"eval_samples_per_second": 2.644,
"eval_steps_per_second": 0.331,
"eval_wer": 11.713753937195762,
"step": 1500
},
{
"epoch": 2.5460636515912896,
"grad_norm": 3.608773946762085,
"learning_rate": 7.851851851851853e-06,
"loss": 0.1222,
"step": 1520
},
{
"epoch": 2.5795644891122276,
"grad_norm": 3.4494569301605225,
"learning_rate": 7.816578483245151e-06,
"loss": 0.1087,
"step": 1540
},
{
"epoch": 2.6130653266331656,
"grad_norm": 3.6112048625946045,
"learning_rate": 7.781305114638449e-06,
"loss": 0.1097,
"step": 1560
},
{
"epoch": 2.6465661641541036,
"grad_norm": 3.1978707313537598,
"learning_rate": 7.746031746031747e-06,
"loss": 0.1101,
"step": 1580
},
{
"epoch": 2.6800670016750416,
"grad_norm": 3.6376733779907227,
"learning_rate": 7.710758377425045e-06,
"loss": 0.1056,
"step": 1600
},
{
"epoch": 2.7135678391959797,
"grad_norm": 3.754915237426758,
"learning_rate": 7.675485008818343e-06,
"loss": 0.1103,
"step": 1620
},
{
"epoch": 2.7470686767169177,
"grad_norm": 3.329240322113037,
"learning_rate": 7.64021164021164e-06,
"loss": 0.1193,
"step": 1640
},
{
"epoch": 2.7805695142378557,
"grad_norm": 3.9300169944763184,
"learning_rate": 7.604938271604939e-06,
"loss": 0.1232,
"step": 1660
},
{
"epoch": 2.8140703517587937,
"grad_norm": 4.290626049041748,
"learning_rate": 7.569664902998237e-06,
"loss": 0.1374,
"step": 1680
},
{
"epoch": 2.8475711892797317,
"grad_norm": 3.8987998962402344,
"learning_rate": 7.534391534391535e-06,
"loss": 0.1062,
"step": 1700
},
{
"epoch": 2.8810720268006698,
"grad_norm": 4.234960079193115,
"learning_rate": 7.499118165784833e-06,
"loss": 0.111,
"step": 1720
},
{
"epoch": 2.914572864321608,
"grad_norm": 3.103458881378174,
"learning_rate": 7.463844797178131e-06,
"loss": 0.1076,
"step": 1740
},
{
"epoch": 2.948073701842546,
"grad_norm": 3.270204782485962,
"learning_rate": 7.428571428571429e-06,
"loss": 0.1224,
"step": 1760
},
{
"epoch": 2.981574539363484,
"grad_norm": 4.261337757110596,
"learning_rate": 7.393298059964727e-06,
"loss": 0.1224,
"step": 1780
},
{
"epoch": 3.0150753768844223,
"grad_norm": 1.9531301259994507,
"learning_rate": 7.358024691358025e-06,
"loss": 0.0913,
"step": 1800
},
{
"epoch": 3.0485762144053603,
"grad_norm": 1.866215467453003,
"learning_rate": 7.322751322751324e-06,
"loss": 0.0466,
"step": 1820
},
{
"epoch": 3.0820770519262983,
"grad_norm": 3.9456610679626465,
"learning_rate": 7.287477954144622e-06,
"loss": 0.0451,
"step": 1840
},
{
"epoch": 3.1155778894472363,
"grad_norm": 1.9518849849700928,
"learning_rate": 7.25220458553792e-06,
"loss": 0.0414,
"step": 1860
},
{
"epoch": 3.1490787269681744,
"grad_norm": 2.232792854309082,
"learning_rate": 7.216931216931218e-06,
"loss": 0.0502,
"step": 1880
},
{
"epoch": 3.1825795644891124,
"grad_norm": 2.074127674102783,
"learning_rate": 7.181657848324516e-06,
"loss": 0.057,
"step": 1900
},
{
"epoch": 3.2160804020100504,
"grad_norm": 3.5036733150482178,
"learning_rate": 7.146384479717814e-06,
"loss": 0.0515,
"step": 1920
},
{
"epoch": 3.2495812395309884,
"grad_norm": 2.324014186859131,
"learning_rate": 7.111111111111112e-06,
"loss": 0.0454,
"step": 1940
},
{
"epoch": 3.2830820770519265,
"grad_norm": 2.710326671600342,
"learning_rate": 7.07583774250441e-06,
"loss": 0.0473,
"step": 1960
},
{
"epoch": 3.3165829145728645,
"grad_norm": 2.8943896293640137,
"learning_rate": 7.040564373897708e-06,
"loss": 0.046,
"step": 1980
},
{
"epoch": 3.3500837520938025,
"grad_norm": 2.319986581802368,
"learning_rate": 7.005291005291006e-06,
"loss": 0.0449,
"step": 2000
},
{
"epoch": 3.3500837520938025,
"eval_loss": 0.10934468358755112,
"eval_runtime": 1762.4506,
"eval_samples_per_second": 2.71,
"eval_steps_per_second": 0.339,
"eval_wer": 7.172854824854443,
"step": 2000
},
{
"epoch": 3.3835845896147405,
"grad_norm": 3.317129135131836,
"learning_rate": 6.9700176366843046e-06,
"loss": 0.0645,
"step": 2020
},
{
"epoch": 3.4170854271356785,
"grad_norm": 1.9533768892288208,
"learning_rate": 6.9347442680776025e-06,
"loss": 0.0432,
"step": 2040
},
{
"epoch": 3.4505862646566166,
"grad_norm": 2.0508453845977783,
"learning_rate": 6.8994708994709005e-06,
"loss": 0.0521,
"step": 2060
},
{
"epoch": 3.4840871021775546,
"grad_norm": 2.163236141204834,
"learning_rate": 6.8641975308641985e-06,
"loss": 0.0529,
"step": 2080
},
{
"epoch": 3.5175879396984926,
"grad_norm": 2.7154581546783447,
"learning_rate": 6.8289241622574965e-06,
"loss": 0.0452,
"step": 2100
},
{
"epoch": 3.5510887772194306,
"grad_norm": 3.0822432041168213,
"learning_rate": 6.7936507936507944e-06,
"loss": 0.0546,
"step": 2120
},
{
"epoch": 3.5845896147403686,
"grad_norm": 4.19010591506958,
"learning_rate": 6.758377425044092e-06,
"loss": 0.0529,
"step": 2140
},
{
"epoch": 3.6180904522613067,
"grad_norm": 2.9883594512939453,
"learning_rate": 6.72310405643739e-06,
"loss": 0.0503,
"step": 2160
},
{
"epoch": 3.6515912897822447,
"grad_norm": 2.3664371967315674,
"learning_rate": 6.687830687830688e-06,
"loss": 0.0498,
"step": 2180
},
{
"epoch": 3.6850921273031827,
"grad_norm": 2.0549991130828857,
"learning_rate": 6.652557319223986e-06,
"loss": 0.051,
"step": 2200
},
{
"epoch": 3.7185929648241207,
"grad_norm": 2.5339038372039795,
"learning_rate": 6.617283950617285e-06,
"loss": 0.0568,
"step": 2220
},
{
"epoch": 3.7520938023450587,
"grad_norm": 1.9988099336624146,
"learning_rate": 6.582010582010583e-06,
"loss": 0.051,
"step": 2240
},
{
"epoch": 3.7855946398659968,
"grad_norm": 2.5243782997131348,
"learning_rate": 6.546737213403881e-06,
"loss": 0.056,
"step": 2260
},
{
"epoch": 3.819095477386935,
"grad_norm": 3.157158136367798,
"learning_rate": 6.511463844797179e-06,
"loss": 0.0497,
"step": 2280
},
{
"epoch": 3.852596314907873,
"grad_norm": 1.9286202192306519,
"learning_rate": 6.476190476190477e-06,
"loss": 0.0426,
"step": 2300
},
{
"epoch": 3.886097152428811,
"grad_norm": 3.808802604675293,
"learning_rate": 6.440917107583775e-06,
"loss": 0.0499,
"step": 2320
},
{
"epoch": 3.919597989949749,
"grad_norm": 2.506671667098999,
"learning_rate": 6.405643738977073e-06,
"loss": 0.052,
"step": 2340
},
{
"epoch": 3.953098827470687,
"grad_norm": 2.9451920986175537,
"learning_rate": 6.370370370370371e-06,
"loss": 0.0552,
"step": 2360
},
{
"epoch": 3.986599664991625,
"grad_norm": 2.592744827270508,
"learning_rate": 6.335097001763669e-06,
"loss": 0.0527,
"step": 2380
},
{
"epoch": 4.0201005025125625,
"grad_norm": 1.8891575336456299,
"learning_rate": 6.299823633156967e-06,
"loss": 0.0289,
"step": 2400
},
{
"epoch": 4.0536013400335005,
"grad_norm": 1.8053243160247803,
"learning_rate": 6.264550264550266e-06,
"loss": 0.0192,
"step": 2420
},
{
"epoch": 4.0871021775544385,
"grad_norm": 2.0084407329559326,
"learning_rate": 6.229276895943564e-06,
"loss": 0.0242,
"step": 2440
},
{
"epoch": 4.1206030150753765,
"grad_norm": 1.5919119119644165,
"learning_rate": 6.194003527336862e-06,
"loss": 0.0211,
"step": 2460
},
{
"epoch": 4.1541038525963145,
"grad_norm": 1.9214613437652588,
"learning_rate": 6.15873015873016e-06,
"loss": 0.0233,
"step": 2480
},
{
"epoch": 4.187604690117253,
"grad_norm": 1.2652311325073242,
"learning_rate": 6.123456790123458e-06,
"loss": 0.0199,
"step": 2500
},
{
"epoch": 4.187604690117253,
"eval_loss": 0.0981329157948494,
"eval_runtime": 1779.0213,
"eval_samples_per_second": 2.685,
"eval_steps_per_second": 0.336,
"eval_wer": 6.707549871146321,
"step": 2500
},
{
"epoch": 4.221105527638191,
"grad_norm": 2.026528835296631,
"learning_rate": 6.088183421516756e-06,
"loss": 0.0217,
"step": 2520
},
{
"epoch": 4.254606365159129,
"grad_norm": 1.596919059753418,
"learning_rate": 6.052910052910054e-06,
"loss": 0.0167,
"step": 2540
},
{
"epoch": 4.288107202680067,
"grad_norm": 2.9445090293884277,
"learning_rate": 6.017636684303352e-06,
"loss": 0.0225,
"step": 2560
},
{
"epoch": 4.321608040201005,
"grad_norm": 2.4160282611846924,
"learning_rate": 5.9823633156966496e-06,
"loss": 0.0253,
"step": 2580
},
{
"epoch": 4.355108877721943,
"grad_norm": 1.461127758026123,
"learning_rate": 5.9470899470899475e-06,
"loss": 0.0197,
"step": 2600
},
{
"epoch": 4.388609715242881,
"grad_norm": 2.7892863750457764,
"learning_rate": 5.911816578483246e-06,
"loss": 0.022,
"step": 2620
},
{
"epoch": 4.422110552763819,
"grad_norm": 1.651208758354187,
"learning_rate": 5.876543209876544e-06,
"loss": 0.0215,
"step": 2640
},
{
"epoch": 4.455611390284757,
"grad_norm": 2.2500391006469727,
"learning_rate": 5.841269841269842e-06,
"loss": 0.0247,
"step": 2660
},
{
"epoch": 4.489112227805695,
"grad_norm": 4.447635173797607,
"learning_rate": 5.80599647266314e-06,
"loss": 0.0263,
"step": 2680
},
{
"epoch": 4.522613065326633,
"grad_norm": 0.8300407528877258,
"learning_rate": 5.770723104056438e-06,
"loss": 0.0209,
"step": 2700
},
{
"epoch": 4.556113902847571,
"grad_norm": 1.6874111890792847,
"learning_rate": 5.735449735449736e-06,
"loss": 0.0195,
"step": 2720
},
{
"epoch": 4.589614740368509,
"grad_norm": 2.4045815467834473,
"learning_rate": 5.700176366843034e-06,
"loss": 0.0224,
"step": 2740
},
{
"epoch": 4.623115577889447,
"grad_norm": 2.3160908222198486,
"learning_rate": 5.664902998236332e-06,
"loss": 0.0179,
"step": 2760
},
{
"epoch": 4.656616415410385,
"grad_norm": 1.6684287786483765,
"learning_rate": 5.62962962962963e-06,
"loss": 0.0238,
"step": 2780
},
{
"epoch": 4.690117252931323,
"grad_norm": 1.973906397819519,
"learning_rate": 5.594356261022928e-06,
"loss": 0.0226,
"step": 2800
},
{
"epoch": 4.723618090452261,
"grad_norm": 2.270906686782837,
"learning_rate": 5.559082892416227e-06,
"loss": 0.0232,
"step": 2820
},
{
"epoch": 4.757118927973199,
"grad_norm": 1.8875011205673218,
"learning_rate": 5.523809523809525e-06,
"loss": 0.0218,
"step": 2840
},
{
"epoch": 4.790619765494137,
"grad_norm": 1.1312583684921265,
"learning_rate": 5.488536155202823e-06,
"loss": 0.0212,
"step": 2860
},
{
"epoch": 4.824120603015075,
"grad_norm": 0.864783525466919,
"learning_rate": 5.453262786596121e-06,
"loss": 0.023,
"step": 2880
},
{
"epoch": 4.857621440536013,
"grad_norm": 1.2935965061187744,
"learning_rate": 5.417989417989419e-06,
"loss": 0.019,
"step": 2900
},
{
"epoch": 4.891122278056951,
"grad_norm": 2.4576382637023926,
"learning_rate": 5.382716049382717e-06,
"loss": 0.0193,
"step": 2920
},
{
"epoch": 4.924623115577889,
"grad_norm": 2.71472430229187,
"learning_rate": 5.347442680776015e-06,
"loss": 0.0253,
"step": 2940
},
{
"epoch": 4.958123953098827,
"grad_norm": 2.84940505027771,
"learning_rate": 5.312169312169313e-06,
"loss": 0.0218,
"step": 2960
},
{
"epoch": 4.991624790619765,
"grad_norm": 1.8483999967575073,
"learning_rate": 5.276895943562611e-06,
"loss": 0.0226,
"step": 2980
},
{
"epoch": 5.025125628140704,
"grad_norm": 0.6126876473426819,
"learning_rate": 5.241622574955909e-06,
"loss": 0.0101,
"step": 3000
},
{
"epoch": 5.025125628140704,
"eval_loss": 0.09390027821063995,
"eval_runtime": 1767.4512,
"eval_samples_per_second": 2.702,
"eval_steps_per_second": 0.338,
"eval_wer": 5.500143170754987,
"step": 3000
},
{
"epoch": 5.058626465661642,
"grad_norm": 0.3711394965648651,
"learning_rate": 5.2063492063492076e-06,
"loss": 0.0119,
"step": 3020
},
{
"epoch": 5.09212730318258,
"grad_norm": 0.5738839507102966,
"learning_rate": 5.1710758377425055e-06,
"loss": 0.0086,
"step": 3040
},
{
"epoch": 5.125628140703517,
"grad_norm": 0.7609245777130127,
"learning_rate": 5.1358024691358035e-06,
"loss": 0.0093,
"step": 3060
},
{
"epoch": 5.159128978224456,
"grad_norm": 1.2764722108840942,
"learning_rate": 5.1005291005291015e-06,
"loss": 0.0111,
"step": 3080
},
{
"epoch": 5.192629815745394,
"grad_norm": 2.1169776916503906,
"learning_rate": 5.0652557319223995e-06,
"loss": 0.0107,
"step": 3100
},
{
"epoch": 5.226130653266332,
"grad_norm": 2.1893081665039062,
"learning_rate": 5.0299823633156974e-06,
"loss": 0.0097,
"step": 3120
},
{
"epoch": 5.259631490787269,
"grad_norm": 2.2419638633728027,
"learning_rate": 4.9947089947089946e-06,
"loss": 0.0098,
"step": 3140
},
{
"epoch": 5.293132328308207,
"grad_norm": 0.6479611992835999,
"learning_rate": 4.959435626102293e-06,
"loss": 0.0082,
"step": 3160
},
{
"epoch": 5.326633165829146,
"grad_norm": 0.4799642860889435,
"learning_rate": 4.924162257495591e-06,
"loss": 0.0119,
"step": 3180
},
{
"epoch": 5.360134003350084,
"grad_norm": 0.7716453075408936,
"learning_rate": 4.888888888888889e-06,
"loss": 0.0086,
"step": 3200
},
{
"epoch": 5.393634840871022,
"grad_norm": 1.2303547859191895,
"learning_rate": 4.853615520282187e-06,
"loss": 0.009,
"step": 3220
},
{
"epoch": 5.42713567839196,
"grad_norm": 0.6345349550247192,
"learning_rate": 4.818342151675485e-06,
"loss": 0.0085,
"step": 3240
},
{
"epoch": 5.460636515912898,
"grad_norm": 0.9741530418395996,
"learning_rate": 4.783068783068783e-06,
"loss": 0.0082,
"step": 3260
},
{
"epoch": 5.494137353433836,
"grad_norm": 1.1631624698638916,
"learning_rate": 4.747795414462081e-06,
"loss": 0.0086,
"step": 3280
},
{
"epoch": 5.527638190954773,
"grad_norm": 0.6502953767776489,
"learning_rate": 4.712522045855379e-06,
"loss": 0.0093,
"step": 3300
},
{
"epoch": 5.561139028475711,
"grad_norm": 0.7464337348937988,
"learning_rate": 4.677248677248677e-06,
"loss": 0.0086,
"step": 3320
},
{
"epoch": 5.594639865996649,
"grad_norm": 1.017751693725586,
"learning_rate": 4.641975308641975e-06,
"loss": 0.0091,
"step": 3340
},
{
"epoch": 5.628140703517588,
"grad_norm": 0.4273395836353302,
"learning_rate": 4.606701940035274e-06,
"loss": 0.0078,
"step": 3360
},
{
"epoch": 5.661641541038526,
"grad_norm": 0.6737497448921204,
"learning_rate": 4.571428571428572e-06,
"loss": 0.0106,
"step": 3380
},
{
"epoch": 5.695142378559464,
"grad_norm": 1.0791343450546265,
"learning_rate": 4.53615520282187e-06,
"loss": 0.0097,
"step": 3400
},
{
"epoch": 5.728643216080402,
"grad_norm": 1.0891772508621216,
"learning_rate": 4.500881834215168e-06,
"loss": 0.0109,
"step": 3420
},
{
"epoch": 5.76214405360134,
"grad_norm": 0.7465157508850098,
"learning_rate": 4.465608465608466e-06,
"loss": 0.0078,
"step": 3440
},
{
"epoch": 5.795644891122278,
"grad_norm": 0.7693866491317749,
"learning_rate": 4.430335097001764e-06,
"loss": 0.009,
"step": 3460
},
{
"epoch": 5.8291457286432165,
"grad_norm": 1.3295698165893555,
"learning_rate": 4.395061728395062e-06,
"loss": 0.016,
"step": 3480
},
{
"epoch": 5.8626465661641545,
"grad_norm": 2.4605352878570557,
"learning_rate": 4.35978835978836e-06,
"loss": 0.0081,
"step": 3500
},
{
"epoch": 5.8626465661641545,
"eval_loss": 0.09377142041921616,
"eval_runtime": 1773.01,
"eval_samples_per_second": 2.694,
"eval_steps_per_second": 0.337,
"eval_wer": 5.010976424549012,
"step": 3500
},
{
"epoch": 5.8961474036850925,
"grad_norm": 0.8250058889389038,
"learning_rate": 4.324514991181658e-06,
"loss": 0.0108,
"step": 3520
},
{
"epoch": 5.9296482412060305,
"grad_norm": 1.3606537580490112,
"learning_rate": 4.289241622574956e-06,
"loss": 0.0125,
"step": 3540
},
{
"epoch": 5.9631490787269685,
"grad_norm": 0.6893450021743774,
"learning_rate": 4.2539682539682546e-06,
"loss": 0.0101,
"step": 3560
},
{
"epoch": 5.9966499162479066,
"grad_norm": 0.8129726052284241,
"learning_rate": 4.2186948853615525e-06,
"loss": 0.0095,
"step": 3580
},
{
"epoch": 6.030150753768845,
"grad_norm": 0.5319514274597168,
"learning_rate": 4.1834215167548505e-06,
"loss": 0.0051,
"step": 3600
},
{
"epoch": 6.063651591289783,
"grad_norm": 0.20368462800979614,
"learning_rate": 4.1481481481481485e-06,
"loss": 0.0049,
"step": 3620
},
{
"epoch": 6.097152428810721,
"grad_norm": 1.1721038818359375,
"learning_rate": 4.1128747795414465e-06,
"loss": 0.0041,
"step": 3640
},
{
"epoch": 6.130653266331659,
"grad_norm": 0.17129285633563995,
"learning_rate": 4.0776014109347444e-06,
"loss": 0.0055,
"step": 3660
},
{
"epoch": 6.164154103852597,
"grad_norm": 0.31987234950065613,
"learning_rate": 4.042328042328042e-06,
"loss": 0.0037,
"step": 3680
},
{
"epoch": 6.197654941373535,
"grad_norm": 0.3214021921157837,
"learning_rate": 4.00705467372134e-06,
"loss": 0.0052,
"step": 3700
},
{
"epoch": 6.231155778894473,
"grad_norm": 1.3790876865386963,
"learning_rate": 3.971781305114638e-06,
"loss": 0.0053,
"step": 3720
},
{
"epoch": 6.264656616415411,
"grad_norm": 0.549566924571991,
"learning_rate": 3.936507936507936e-06,
"loss": 0.0048,
"step": 3740
},
{
"epoch": 6.298157453936349,
"grad_norm": 0.2458494007587433,
"learning_rate": 3.901234567901235e-06,
"loss": 0.006,
"step": 3760
},
{
"epoch": 6.331658291457287,
"grad_norm": 2.3661324977874756,
"learning_rate": 3.865961199294533e-06,
"loss": 0.0046,
"step": 3780
},
{
"epoch": 6.365159128978225,
"grad_norm": 0.7839031219482422,
"learning_rate": 3.830687830687831e-06,
"loss": 0.0059,
"step": 3800
},
{
"epoch": 6.398659966499163,
"grad_norm": 0.2562466263771057,
"learning_rate": 3.795414462081129e-06,
"loss": 0.0038,
"step": 3820
},
{
"epoch": 6.432160804020101,
"grad_norm": 0.9680606126785278,
"learning_rate": 3.760141093474427e-06,
"loss": 0.0053,
"step": 3840
},
{
"epoch": 6.465661641541039,
"grad_norm": 0.5647270083427429,
"learning_rate": 3.724867724867725e-06,
"loss": 0.0049,
"step": 3860
},
{
"epoch": 6.499162479061977,
"grad_norm": 0.5850030183792114,
"learning_rate": 3.689594356261023e-06,
"loss": 0.0039,
"step": 3880
},
{
"epoch": 6.532663316582915,
"grad_norm": 0.1745942085981369,
"learning_rate": 3.654320987654321e-06,
"loss": 0.0035,
"step": 3900
},
{
"epoch": 6.566164154103853,
"grad_norm": 0.2170235961675644,
"learning_rate": 3.6190476190476194e-06,
"loss": 0.0045,
"step": 3920
},
{
"epoch": 6.599664991624791,
"grad_norm": 0.30363383889198303,
"learning_rate": 3.5837742504409174e-06,
"loss": 0.0031,
"step": 3940
},
{
"epoch": 6.633165829145729,
"grad_norm": 0.22252851724624634,
"learning_rate": 3.5485008818342153e-06,
"loss": 0.004,
"step": 3960
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.9289886951446533,
"learning_rate": 3.5132275132275133e-06,
"loss": 0.0055,
"step": 3980
},
{
"epoch": 6.700167504187605,
"grad_norm": 1.094429612159729,
"learning_rate": 3.4779541446208113e-06,
"loss": 0.0041,
"step": 4000
},
{
"epoch": 6.700167504187605,
"eval_loss": 0.09379494935274124,
"eval_runtime": 1761.7753,
"eval_samples_per_second": 2.711,
"eval_steps_per_second": 0.339,
"eval_wer": 4.89166746205975,
"step": 4000
},
{
"epoch": 6.733668341708543,
"grad_norm": 0.8961842656135559,
"learning_rate": 3.4426807760141097e-06,
"loss": 0.0034,
"step": 4020
},
{
"epoch": 6.767169179229481,
"grad_norm": 0.45882654190063477,
"learning_rate": 3.4074074074074077e-06,
"loss": 0.0054,
"step": 4040
},
{
"epoch": 6.800670016750419,
"grad_norm": 0.27223679423332214,
"learning_rate": 3.3721340388007056e-06,
"loss": 0.0034,
"step": 4060
},
{
"epoch": 6.834170854271357,
"grad_norm": 1.3323800563812256,
"learning_rate": 3.3368606701940036e-06,
"loss": 0.0055,
"step": 4080
},
{
"epoch": 6.867671691792295,
"grad_norm": 0.24382267892360687,
"learning_rate": 3.3015873015873016e-06,
"loss": 0.0038,
"step": 4100
},
{
"epoch": 6.901172529313233,
"grad_norm": 0.2211693376302719,
"learning_rate": 3.2663139329806e-06,
"loss": 0.0043,
"step": 4120
},
{
"epoch": 6.934673366834171,
"grad_norm": 0.20270536839962006,
"learning_rate": 3.231040564373898e-06,
"loss": 0.0047,
"step": 4140
},
{
"epoch": 6.968174204355109,
"grad_norm": 2.0083487033843994,
"learning_rate": 3.195767195767196e-06,
"loss": 0.0054,
"step": 4160
},
{
"epoch": 7.001675041876047,
"grad_norm": 0.1442304253578186,
"learning_rate": 3.160493827160494e-06,
"loss": 0.005,
"step": 4180
},
{
"epoch": 7.035175879396985,
"grad_norm": 0.38336917757987976,
"learning_rate": 3.126984126984127e-06,
"loss": 0.0031,
"step": 4200
},
{
"epoch": 7.068676716917923,
"grad_norm": 0.13572201132774353,
"learning_rate": 3.0917107583774254e-06,
"loss": 0.0027,
"step": 4220
},
{
"epoch": 7.102177554438861,
"grad_norm": 1.8334097862243652,
"learning_rate": 3.0564373897707234e-06,
"loss": 0.0025,
"step": 4240
},
{
"epoch": 7.135678391959799,
"grad_norm": 1.0188051462173462,
"learning_rate": 3.0211640211640214e-06,
"loss": 0.0032,
"step": 4260
},
{
"epoch": 7.169179229480737,
"grad_norm": 0.13007956743240356,
"learning_rate": 2.9858906525573194e-06,
"loss": 0.0033,
"step": 4280
},
{
"epoch": 7.202680067001675,
"grad_norm": 0.07188330590724945,
"learning_rate": 2.9506172839506173e-06,
"loss": 0.0025,
"step": 4300
},
{
"epoch": 7.236180904522613,
"grad_norm": 0.2624744176864624,
"learning_rate": 2.9153439153439157e-06,
"loss": 0.0022,
"step": 4320
},
{
"epoch": 7.269681742043551,
"grad_norm": 0.1817062646150589,
"learning_rate": 2.8800705467372137e-06,
"loss": 0.0027,
"step": 4340
},
{
"epoch": 7.303182579564489,
"grad_norm": 0.23730872571468353,
"learning_rate": 2.8447971781305117e-06,
"loss": 0.0028,
"step": 4360
},
{
"epoch": 7.336683417085427,
"grad_norm": 0.44462695717811584,
"learning_rate": 2.8095238095238096e-06,
"loss": 0.0027,
"step": 4380
},
{
"epoch": 7.370184254606365,
"grad_norm": 0.16056309640407562,
"learning_rate": 2.7742504409171076e-06,
"loss": 0.0021,
"step": 4400
},
{
"epoch": 7.403685092127303,
"grad_norm": 0.1740381121635437,
"learning_rate": 2.738977072310406e-06,
"loss": 0.0021,
"step": 4420
},
{
"epoch": 7.437185929648241,
"grad_norm": 0.2567131519317627,
"learning_rate": 2.703703703703704e-06,
"loss": 0.0025,
"step": 4440
},
{
"epoch": 7.4706867671691795,
"grad_norm": 0.22708427906036377,
"learning_rate": 2.668430335097002e-06,
"loss": 0.0018,
"step": 4460
},
{
"epoch": 7.5041876046901175,
"grad_norm": 0.14704178273677826,
"learning_rate": 2.6331569664903e-06,
"loss": 0.0023,
"step": 4480
},
{
"epoch": 7.5376884422110555,
"grad_norm": 0.26313552260398865,
"learning_rate": 2.597883597883598e-06,
"loss": 0.0026,
"step": 4500
},
{
"epoch": 7.5376884422110555,
"eval_loss": 0.09553142637014389,
"eval_runtime": 1387.1198,
"eval_samples_per_second": 3.443,
"eval_steps_per_second": 0.43,
"eval_wer": 4.50510642359454,
"step": 4500
},
{
"epoch": 7.5711892797319935,
"grad_norm": 0.09787946194410324,
"learning_rate": 2.5626102292768963e-06,
"loss": 0.0022,
"step": 4520
},
{
"epoch": 7.6046901172529315,
"grad_norm": 0.10577196627855301,
"learning_rate": 2.5273368606701943e-06,
"loss": 0.0023,
"step": 4540
},
{
"epoch": 7.63819095477387,
"grad_norm": 0.11914186924695969,
"learning_rate": 2.4920634920634923e-06,
"loss": 0.0026,
"step": 4560
},
{
"epoch": 7.671691792294808,
"grad_norm": 0.6797345876693726,
"learning_rate": 2.4567901234567902e-06,
"loss": 0.0028,
"step": 4580
},
{
"epoch": 7.705192629815746,
"grad_norm": 0.11635720729827881,
"learning_rate": 2.4215167548500882e-06,
"loss": 0.0021,
"step": 4600
},
{
"epoch": 7.738693467336684,
"grad_norm": 0.0952395349740982,
"learning_rate": 2.3862433862433866e-06,
"loss": 0.0021,
"step": 4620
},
{
"epoch": 7.772194304857622,
"grad_norm": 0.11671995371580124,
"learning_rate": 2.3509700176366846e-06,
"loss": 0.0023,
"step": 4640
},
{
"epoch": 7.80569514237856,
"grad_norm": 0.10185588896274567,
"learning_rate": 2.3156966490299826e-06,
"loss": 0.003,
"step": 4660
},
{
"epoch": 7.839195979899498,
"grad_norm": 0.11902861297130585,
"learning_rate": 2.2804232804232805e-06,
"loss": 0.0021,
"step": 4680
},
{
"epoch": 7.872696817420436,
"grad_norm": 0.10562069714069366,
"learning_rate": 2.2451499118165785e-06,
"loss": 0.002,
"step": 4700
},
{
"epoch": 7.906197654941374,
"grad_norm": 0.1272854059934616,
"learning_rate": 2.209876543209877e-06,
"loss": 0.0024,
"step": 4720
},
{
"epoch": 7.939698492462312,
"grad_norm": 0.14873459935188293,
"learning_rate": 2.174603174603175e-06,
"loss": 0.0021,
"step": 4740
},
{
"epoch": 7.97319932998325,
"grad_norm": 0.09556525200605392,
"learning_rate": 2.139329805996473e-06,
"loss": 0.0021,
"step": 4760
},
{
"epoch": 8.006700167504187,
"grad_norm": 0.2545607388019562,
"learning_rate": 2.104056437389771e-06,
"loss": 0.0021,
"step": 4780
},
{
"epoch": 8.040201005025125,
"grad_norm": 0.09803763031959534,
"learning_rate": 2.068783068783069e-06,
"loss": 0.0017,
"step": 4800
},
{
"epoch": 8.073701842546063,
"grad_norm": 0.6409705281257629,
"learning_rate": 2.0335097001763672e-06,
"loss": 0.0019,
"step": 4820
},
{
"epoch": 8.107202680067001,
"grad_norm": 0.08693050593137741,
"learning_rate": 1.998236331569665e-06,
"loss": 0.0017,
"step": 4840
},
{
"epoch": 8.140703517587939,
"grad_norm": 0.09300371259450912,
"learning_rate": 1.962962962962963e-06,
"loss": 0.0022,
"step": 4860
},
{
"epoch": 8.174204355108877,
"grad_norm": 0.09886649250984192,
"learning_rate": 1.927689594356261e-06,
"loss": 0.0016,
"step": 4880
},
{
"epoch": 8.207705192629815,
"grad_norm": 0.14025329053401947,
"learning_rate": 1.8924162257495593e-06,
"loss": 0.0031,
"step": 4900
},
{
"epoch": 8.241206030150753,
"grad_norm": 0.06884673237800598,
"learning_rate": 1.8571428571428573e-06,
"loss": 0.0014,
"step": 4920
},
{
"epoch": 8.274706867671691,
"grad_norm": 0.08606573939323425,
"learning_rate": 1.8218694885361555e-06,
"loss": 0.0015,
"step": 4940
},
{
"epoch": 8.308207705192629,
"grad_norm": 0.5949509739875793,
"learning_rate": 1.7865961199294535e-06,
"loss": 0.0017,
"step": 4960
},
{
"epoch": 8.341708542713567,
"grad_norm": 0.07532797753810883,
"learning_rate": 1.7513227513227514e-06,
"loss": 0.0015,
"step": 4980
},
{
"epoch": 8.375209380234505,
"grad_norm": 0.07208308577537537,
"learning_rate": 1.7160493827160496e-06,
"loss": 0.0016,
"step": 5000
},
{
"epoch": 8.375209380234505,
"eval_loss": 0.0970502495765686,
"eval_runtime": 1406.3953,
"eval_samples_per_second": 3.396,
"eval_steps_per_second": 0.424,
"eval_wer": 4.457382838598836,
"step": 5000
}
],
"logging_steps": 20,
"max_steps": 5970,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.616673800159232e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}