adapter_all_data / checkpoint-3792 /trainer_state.json
bitwisemind's picture
Checkpoint at step 3792
8ff5543 verified
{
"best_metric": 30.77792278066015,
"best_model_checkpoint": "./whisper-lora-15k-adapters/checkpoint-3792",
"epoch": 4.4402810304449645,
"eval_steps": 237,
"global_step": 3792,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02927400468384075,
"grad_norm": 0.578233540058136,
"learning_rate": 0.0005,
"loss": 0.99,
"step": 25
},
{
"epoch": 0.0585480093676815,
"grad_norm": 0.3136115074157715,
"learning_rate": 0.001,
"loss": 0.7869,
"step": 50
},
{
"epoch": 0.08782201405152225,
"grad_norm": 0.3556165397167206,
"learning_rate": 0.0009940758293838863,
"loss": 0.6559,
"step": 75
},
{
"epoch": 0.117096018735363,
"grad_norm": 0.43047836422920227,
"learning_rate": 0.0009881516587677726,
"loss": 0.6799,
"step": 100
},
{
"epoch": 0.14637002341920374,
"grad_norm": 0.41161859035491943,
"learning_rate": 0.0009822274881516586,
"loss": 0.6179,
"step": 125
},
{
"epoch": 0.1756440281030445,
"grad_norm": 0.3486640453338623,
"learning_rate": 0.000976303317535545,
"loss": 0.6218,
"step": 150
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.33961209654808044,
"learning_rate": 0.0009703791469194313,
"loss": 0.5623,
"step": 175
},
{
"epoch": 0.234192037470726,
"grad_norm": 0.4211539328098297,
"learning_rate": 0.0009644549763033176,
"loss": 0.6293,
"step": 200
},
{
"epoch": 0.26346604215456676,
"grad_norm": 0.4401342272758484,
"learning_rate": 0.0009585308056872039,
"loss": 0.65,
"step": 225
},
{
"epoch": 0.2775175644028103,
"eval_loss": 0.6182317733764648,
"eval_runtime": 12305.8898,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 42.11536326582399,
"step": 237
},
{
"epoch": 0.2927400468384075,
"grad_norm": 0.5222854614257812,
"learning_rate": 0.0009526066350710901,
"loss": 0.6494,
"step": 250
},
{
"epoch": 0.32201405152224827,
"grad_norm": 0.5739536285400391,
"learning_rate": 0.0009466824644549763,
"loss": 0.5656,
"step": 275
},
{
"epoch": 0.351288056206089,
"grad_norm": 0.4213266968727112,
"learning_rate": 0.0009407582938388626,
"loss": 0.5781,
"step": 300
},
{
"epoch": 0.3805620608899297,
"grad_norm": 0.5185717344284058,
"learning_rate": 0.0009348341232227489,
"loss": 0.5993,
"step": 325
},
{
"epoch": 0.4098360655737705,
"grad_norm": 0.41156110167503357,
"learning_rate": 0.0009289099526066352,
"loss": 0.5504,
"step": 350
},
{
"epoch": 0.43911007025761123,
"grad_norm": 0.44983068108558655,
"learning_rate": 0.0009229857819905212,
"loss": 0.642,
"step": 375
},
{
"epoch": 0.468384074941452,
"grad_norm": 0.7018289566040039,
"learning_rate": 0.0009170616113744075,
"loss": 0.6313,
"step": 400
},
{
"epoch": 0.49765807962529274,
"grad_norm": 0.41570019721984863,
"learning_rate": 0.0009111374407582938,
"loss": 0.642,
"step": 425
},
{
"epoch": 0.5269320843091335,
"grad_norm": 0.2906375229358673,
"learning_rate": 0.0009052132701421801,
"loss": 0.5501,
"step": 450
},
{
"epoch": 0.5550351288056206,
"eval_loss": 0.5893104076385498,
"eval_runtime": 12217.9829,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 39.71420868158533,
"step": 474
},
{
"epoch": 0.5562060889929742,
"grad_norm": 0.42602404952049255,
"learning_rate": 0.0008992890995260664,
"loss": 0.6419,
"step": 475
},
{
"epoch": 0.585480093676815,
"grad_norm": 0.45508912205696106,
"learning_rate": 0.0008933649289099525,
"loss": 0.5816,
"step": 500
},
{
"epoch": 0.6147540983606558,
"grad_norm": 0.5000929236412048,
"learning_rate": 0.0008874407582938388,
"loss": 0.6941,
"step": 525
},
{
"epoch": 0.6440281030444965,
"grad_norm": 0.4415169656276703,
"learning_rate": 0.0008815165876777251,
"loss": 0.5615,
"step": 550
},
{
"epoch": 0.6733021077283372,
"grad_norm": 0.5120753049850464,
"learning_rate": 0.0008755924170616114,
"loss": 0.559,
"step": 575
},
{
"epoch": 0.702576112412178,
"grad_norm": 0.3653784990310669,
"learning_rate": 0.0008696682464454977,
"loss": 0.5836,
"step": 600
},
{
"epoch": 0.7318501170960188,
"grad_norm": 0.5504665374755859,
"learning_rate": 0.0008637440758293838,
"loss": 0.6163,
"step": 625
},
{
"epoch": 0.7611241217798594,
"grad_norm": 0.49855440855026245,
"learning_rate": 0.0008578199052132701,
"loss": 0.5482,
"step": 650
},
{
"epoch": 0.7903981264637002,
"grad_norm": 0.3784034848213196,
"learning_rate": 0.0008518957345971564,
"loss": 0.5572,
"step": 675
},
{
"epoch": 0.819672131147541,
"grad_norm": 0.5111596584320068,
"learning_rate": 0.0008459715639810427,
"loss": 0.565,
"step": 700
},
{
"epoch": 0.832552693208431,
"eval_loss": 0.5823442339897156,
"eval_runtime": 12225.5714,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 37.68099852505035,
"step": 711
},
{
"epoch": 0.8489461358313818,
"grad_norm": 0.5943437218666077,
"learning_rate": 0.000840047393364929,
"loss": 0.5146,
"step": 725
},
{
"epoch": 0.8782201405152225,
"grad_norm": 0.5228826403617859,
"learning_rate": 0.0008341232227488151,
"loss": 0.5338,
"step": 750
},
{
"epoch": 0.9074941451990632,
"grad_norm": 0.44550982117652893,
"learning_rate": 0.0008281990521327014,
"loss": 0.5631,
"step": 775
},
{
"epoch": 0.936768149882904,
"grad_norm": 0.5326892733573914,
"learning_rate": 0.0008222748815165877,
"loss": 0.5489,
"step": 800
},
{
"epoch": 0.9660421545667447,
"grad_norm": 0.5083812475204468,
"learning_rate": 0.000816350710900474,
"loss": 0.5336,
"step": 825
},
{
"epoch": 0.9953161592505855,
"grad_norm": 0.4346718192100525,
"learning_rate": 0.0008104265402843603,
"loss": 0.6155,
"step": 850
},
{
"epoch": 1.0245901639344261,
"grad_norm": 0.4419436454772949,
"learning_rate": 0.0008045023696682464,
"loss": 0.5506,
"step": 875
},
{
"epoch": 1.053864168618267,
"grad_norm": 0.5935924649238586,
"learning_rate": 0.0007985781990521327,
"loss": 0.5407,
"step": 900
},
{
"epoch": 1.0831381733021077,
"grad_norm": 0.4228830635547638,
"learning_rate": 0.000792654028436019,
"loss": 0.5527,
"step": 925
},
{
"epoch": 1.1100702576112411,
"eval_loss": 0.5060898065567017,
"eval_runtime": 12332.7416,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 37.76065419091052,
"step": 948
},
{
"epoch": 1.1124121779859484,
"grad_norm": 0.37129494547843933,
"learning_rate": 0.0007867298578199053,
"loss": 0.5191,
"step": 950
},
{
"epoch": 1.1416861826697893,
"grad_norm": 0.7254778146743774,
"learning_rate": 0.0007808056872037916,
"loss": 0.5537,
"step": 975
},
{
"epoch": 1.17096018735363,
"grad_norm": 0.4878183603286743,
"learning_rate": 0.0007748815165876777,
"loss": 0.5281,
"step": 1000
},
{
"epoch": 1.2002341920374708,
"grad_norm": 0.35084593296051025,
"learning_rate": 0.000768957345971564,
"loss": 0.5166,
"step": 1025
},
{
"epoch": 1.2295081967213115,
"grad_norm": 0.5030648708343506,
"learning_rate": 0.0007630331753554502,
"loss": 0.5284,
"step": 1050
},
{
"epoch": 1.2587822014051522,
"grad_norm": 0.5004339218139648,
"learning_rate": 0.0007571090047393365,
"loss": 0.5695,
"step": 1075
},
{
"epoch": 1.288056206088993,
"grad_norm": 0.5789551734924316,
"learning_rate": 0.0007511848341232228,
"loss": 0.5511,
"step": 1100
},
{
"epoch": 1.3173302107728337,
"grad_norm": 0.389371782541275,
"learning_rate": 0.0007452606635071089,
"loss": 0.5661,
"step": 1125
},
{
"epoch": 1.3466042154566744,
"grad_norm": 0.38161447644233704,
"learning_rate": 0.0007393364928909952,
"loss": 0.5087,
"step": 1150
},
{
"epoch": 1.3758782201405153,
"grad_norm": 0.40263721346855164,
"learning_rate": 0.0007334123222748815,
"loss": 0.5147,
"step": 1175
},
{
"epoch": 1.3875878220140514,
"eval_loss": 0.5079419016838074,
"eval_runtime": 12322.9428,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 38.37238559521937,
"step": 1185
},
{
"epoch": 1.405152224824356,
"grad_norm": 0.5195249319076538,
"learning_rate": 0.0007274881516587678,
"loss": 0.5409,
"step": 1200
},
{
"epoch": 1.4344262295081966,
"grad_norm": 0.40098896622657776,
"learning_rate": 0.0007215639810426541,
"loss": 0.5121,
"step": 1225
},
{
"epoch": 1.4637002341920375,
"grad_norm": 0.42950162291526794,
"learning_rate": 0.0007156398104265402,
"loss": 0.5155,
"step": 1250
},
{
"epoch": 1.4929742388758782,
"grad_norm": 0.38044580817222595,
"learning_rate": 0.0007097156398104265,
"loss": 0.4727,
"step": 1275
},
{
"epoch": 1.5222482435597189,
"grad_norm": 0.38700923323631287,
"learning_rate": 0.0007037914691943128,
"loss": 0.498,
"step": 1300
},
{
"epoch": 1.5515222482435598,
"grad_norm": 0.4633864760398865,
"learning_rate": 0.0006978672985781991,
"loss": 0.5297,
"step": 1325
},
{
"epoch": 1.5807962529274004,
"grad_norm": 0.48980265855789185,
"learning_rate": 0.0006919431279620854,
"loss": 0.4732,
"step": 1350
},
{
"epoch": 1.6100702576112411,
"grad_norm": 0.3389205038547516,
"learning_rate": 0.0006860189573459715,
"loss": 0.5461,
"step": 1375
},
{
"epoch": 1.639344262295082,
"grad_norm": 0.3686542510986328,
"learning_rate": 0.0006800947867298578,
"loss": 0.5033,
"step": 1400
},
{
"epoch": 1.6651053864168617,
"eval_loss": 0.5103082060813904,
"eval_runtime": 12252.9597,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 39.455228053358894,
"step": 1422
},
{
"epoch": 1.6686182669789227,
"grad_norm": 0.46823108196258545,
"learning_rate": 0.0006741706161137441,
"loss": 0.5392,
"step": 1425
},
{
"epoch": 1.6978922716627634,
"grad_norm": 0.5638931393623352,
"learning_rate": 0.0006682464454976304,
"loss": 0.5253,
"step": 1450
},
{
"epoch": 1.7271662763466042,
"grad_norm": 0.5234322547912598,
"learning_rate": 0.0006623222748815167,
"loss": 0.51,
"step": 1475
},
{
"epoch": 1.756440281030445,
"grad_norm": 0.5467631816864014,
"learning_rate": 0.0006563981042654028,
"loss": 0.5436,
"step": 1500
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.3867318034172058,
"learning_rate": 0.0006504739336492891,
"loss": 0.5142,
"step": 1525
},
{
"epoch": 1.8149882903981265,
"grad_norm": 0.4091216027736664,
"learning_rate": 0.0006445497630331754,
"loss": 0.5345,
"step": 1550
},
{
"epoch": 1.8442622950819674,
"grad_norm": 0.44898247718811035,
"learning_rate": 0.0006386255924170617,
"loss": 0.4937,
"step": 1575
},
{
"epoch": 1.8735362997658078,
"grad_norm": 0.3484508991241455,
"learning_rate": 0.000632701421800948,
"loss": 0.489,
"step": 1600
},
{
"epoch": 1.9028103044496487,
"grad_norm": 0.5735388398170471,
"learning_rate": 0.0006267772511848341,
"loss": 0.4742,
"step": 1625
},
{
"epoch": 1.9320843091334896,
"grad_norm": 0.7618733048439026,
"learning_rate": 0.0006208530805687204,
"loss": 0.5559,
"step": 1650
},
{
"epoch": 1.9426229508196722,
"eval_loss": 0.5032439827919006,
"eval_runtime": 12233.7713,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 39.20600686955827,
"step": 1659
},
{
"epoch": 1.96135831381733,
"grad_norm": 0.3670201003551483,
"learning_rate": 0.0006149289099526067,
"loss": 0.4868,
"step": 1675
},
{
"epoch": 1.990632318501171,
"grad_norm": 0.4840170443058014,
"learning_rate": 0.000609004739336493,
"loss": 0.5458,
"step": 1700
},
{
"epoch": 2.019906323185012,
"grad_norm": 0.30357852578163147,
"learning_rate": 0.0006030805687203791,
"loss": 0.4845,
"step": 1725
},
{
"epoch": 2.0491803278688523,
"grad_norm": 0.43158742785453796,
"learning_rate": 0.0005971563981042653,
"loss": 0.5007,
"step": 1750
},
{
"epoch": 2.078454332552693,
"grad_norm": 0.46644917130470276,
"learning_rate": 0.0005912322274881516,
"loss": 0.4558,
"step": 1775
},
{
"epoch": 2.107728337236534,
"grad_norm": 0.42779088020324707,
"learning_rate": 0.0005853080568720379,
"loss": 0.4736,
"step": 1800
},
{
"epoch": 2.1370023419203745,
"grad_norm": 0.4596354067325592,
"learning_rate": 0.0005793838862559242,
"loss": 0.4338,
"step": 1825
},
{
"epoch": 2.1662763466042154,
"grad_norm": 0.5213513970375061,
"learning_rate": 0.0005734597156398104,
"loss": 0.4657,
"step": 1850
},
{
"epoch": 2.1955503512880563,
"grad_norm": 0.30604368448257446,
"learning_rate": 0.0005675355450236966,
"loss": 0.4697,
"step": 1875
},
{
"epoch": 2.2201405152224822,
"eval_loss": 0.49964743852615356,
"eval_runtime": 12221.7958,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 35.9517533349309,
"step": 1896
},
{
"epoch": 2.2248243559718968,
"grad_norm": 0.38965240120887756,
"learning_rate": 0.0005616113744075829,
"loss": 0.4203,
"step": 1900
},
{
"epoch": 2.2540983606557377,
"grad_norm": 0.5568481087684631,
"learning_rate": 0.0005556872037914692,
"loss": 0.4664,
"step": 1925
},
{
"epoch": 2.2833723653395785,
"grad_norm": 0.6357014179229736,
"learning_rate": 0.0005497630331753555,
"loss": 0.5052,
"step": 1950
},
{
"epoch": 2.312646370023419,
"grad_norm": 0.49635082483291626,
"learning_rate": 0.0005438388625592417,
"loss": 0.4611,
"step": 1975
},
{
"epoch": 2.34192037470726,
"grad_norm": 0.5938425660133362,
"learning_rate": 0.0005379146919431279,
"loss": 0.4882,
"step": 2000
},
{
"epoch": 2.371194379391101,
"grad_norm": 0.38019558787345886,
"learning_rate": 0.0005319905213270142,
"loss": 0.4349,
"step": 2025
},
{
"epoch": 2.4004683840749417,
"grad_norm": 0.3761730492115021,
"learning_rate": 0.0005260663507109005,
"loss": 0.4384,
"step": 2050
},
{
"epoch": 2.429742388758782,
"grad_norm": 0.3853297233581543,
"learning_rate": 0.0005201421800947868,
"loss": 0.4731,
"step": 2075
},
{
"epoch": 2.459016393442623,
"grad_norm": 0.45702874660491943,
"learning_rate": 0.000514218009478673,
"loss": 0.5495,
"step": 2100
},
{
"epoch": 2.4882903981264635,
"grad_norm": 0.4612327814102173,
"learning_rate": 0.0005082938388625592,
"loss": 0.5017,
"step": 2125
},
{
"epoch": 2.4976580796252925,
"eval_loss": 0.4605013132095337,
"eval_runtime": 12477.8005,
"eval_samples_per_second": 0.122,
"eval_steps_per_second": 0.008,
"eval_wer": 37.35402284018324,
"step": 2133
},
{
"epoch": 2.5175644028103044,
"grad_norm": 0.5223235487937927,
"learning_rate": 0.0005023696682464455,
"loss": 0.5064,
"step": 2150
},
{
"epoch": 2.5468384074941453,
"grad_norm": 0.531912624835968,
"learning_rate": 0.0004964454976303318,
"loss": 0.4461,
"step": 2175
},
{
"epoch": 2.576112412177986,
"grad_norm": 0.3550543487071991,
"learning_rate": 0.0004905213270142181,
"loss": 0.475,
"step": 2200
},
{
"epoch": 2.6053864168618266,
"grad_norm": 0.5227505564689636,
"learning_rate": 0.00048459715639810423,
"loss": 0.5369,
"step": 2225
},
{
"epoch": 2.6346604215456675,
"grad_norm": 0.4097291827201843,
"learning_rate": 0.0004786729857819905,
"loss": 0.4568,
"step": 2250
},
{
"epoch": 2.663934426229508,
"grad_norm": 0.3728583753108978,
"learning_rate": 0.0004727488151658768,
"loss": 0.4736,
"step": 2275
},
{
"epoch": 2.693208430913349,
"grad_norm": 0.5369985699653625,
"learning_rate": 0.000466824644549763,
"loss": 0.4443,
"step": 2300
},
{
"epoch": 2.7224824355971897,
"grad_norm": 0.5444718599319458,
"learning_rate": 0.0004609004739336493,
"loss": 0.485,
"step": 2325
},
{
"epoch": 2.7517564402810306,
"grad_norm": 0.36326608061790466,
"learning_rate": 0.00045497630331753553,
"loss": 0.4285,
"step": 2350
},
{
"epoch": 2.775175644028103,
"eval_loss": 0.4545816481113434,
"eval_runtime": 12525.3573,
"eval_samples_per_second": 0.121,
"eval_steps_per_second": 0.008,
"eval_wer": 37.59597393380218,
"step": 2370
},
{
"epoch": 2.781030444964871,
"grad_norm": 0.3528901934623718,
"learning_rate": 0.0004490521327014218,
"loss": 0.4291,
"step": 2375
},
{
"epoch": 2.810304449648712,
"grad_norm": 0.25132936239242554,
"learning_rate": 0.0004431279620853081,
"loss": 0.4559,
"step": 2400
},
{
"epoch": 2.839578454332553,
"grad_norm": 0.3684830367565155,
"learning_rate": 0.0004372037914691943,
"loss": 0.3786,
"step": 2425
},
{
"epoch": 2.8688524590163933,
"grad_norm": 0.4008779227733612,
"learning_rate": 0.0004312796208530806,
"loss": 0.398,
"step": 2450
},
{
"epoch": 2.898126463700234,
"grad_norm": 0.4344983696937561,
"learning_rate": 0.00042535545023696683,
"loss": 0.4301,
"step": 2475
},
{
"epoch": 2.927400468384075,
"grad_norm": 0.6371504664421082,
"learning_rate": 0.0004194312796208531,
"loss": 0.4736,
"step": 2500
},
{
"epoch": 2.9566744730679155,
"grad_norm": 0.44420474767684937,
"learning_rate": 0.0004135071090047394,
"loss": 0.4088,
"step": 2525
},
{
"epoch": 2.9859484777517564,
"grad_norm": 0.3830738067626953,
"learning_rate": 0.00040758293838862557,
"loss": 0.423,
"step": 2550
},
{
"epoch": 3.0152224824355973,
"grad_norm": 0.4638129770755768,
"learning_rate": 0.00040165876777251185,
"loss": 0.3978,
"step": 2575
},
{
"epoch": 3.0444964871194378,
"grad_norm": 0.43738076090812683,
"learning_rate": 0.0003957345971563981,
"loss": 0.4102,
"step": 2600
},
{
"epoch": 3.0526932084309135,
"eval_loss": 0.45073771476745605,
"eval_runtime": 10045.043,
"eval_samples_per_second": 0.151,
"eval_steps_per_second": 0.009,
"eval_wer": 34.176755639038404,
"step": 2607
},
{
"epoch": 3.0737704918032787,
"grad_norm": 0.5374141335487366,
"learning_rate": 0.00038981042654028436,
"loss": 0.4432,
"step": 2625
},
{
"epoch": 3.1030444964871196,
"grad_norm": 0.4544264078140259,
"learning_rate": 0.00038388625592417064,
"loss": 0.4027,
"step": 2650
},
{
"epoch": 3.13231850117096,
"grad_norm": 0.37046387791633606,
"learning_rate": 0.00037796208530805687,
"loss": 0.3846,
"step": 2675
},
{
"epoch": 3.161592505854801,
"grad_norm": 0.44554048776626587,
"learning_rate": 0.00037203791469194315,
"loss": 0.4104,
"step": 2700
},
{
"epoch": 3.190866510538642,
"grad_norm": 0.551729679107666,
"learning_rate": 0.0003661137440758294,
"loss": 0.3826,
"step": 2725
},
{
"epoch": 3.2201405152224822,
"grad_norm": 0.4279089868068695,
"learning_rate": 0.00036018957345971566,
"loss": 0.3984,
"step": 2750
},
{
"epoch": 3.249414519906323,
"grad_norm": 0.505104660987854,
"learning_rate": 0.00035426540284360194,
"loss": 0.3973,
"step": 2775
},
{
"epoch": 3.278688524590164,
"grad_norm": 0.4596370458602905,
"learning_rate": 0.00034834123222748817,
"loss": 0.4953,
"step": 2800
},
{
"epoch": 3.307962529274005,
"grad_norm": 0.40555649995803833,
"learning_rate": 0.00034241706161137445,
"loss": 0.4195,
"step": 2825
},
{
"epoch": 3.330210772833724,
"eval_loss": 0.45748990774154663,
"eval_runtime": 10069.353,
"eval_samples_per_second": 0.151,
"eval_steps_per_second": 0.009,
"eval_wer": 36.828563761779236,
"step": 2844
},
{
"epoch": 3.3372365339578454,
"grad_norm": 0.44486892223358154,
"learning_rate": 0.0003364928909952606,
"loss": 0.4271,
"step": 2850
},
{
"epoch": 3.3665105386416863,
"grad_norm": 0.5062658786773682,
"learning_rate": 0.0003305687203791469,
"loss": 0.4694,
"step": 2875
},
{
"epoch": 3.3957845433255267,
"grad_norm": 0.4567612409591675,
"learning_rate": 0.0003246445497630332,
"loss": 0.4907,
"step": 2900
},
{
"epoch": 3.4250585480093676,
"grad_norm": 0.2982103228569031,
"learning_rate": 0.0003187203791469194,
"loss": 0.4649,
"step": 2925
},
{
"epoch": 3.4543325526932085,
"grad_norm": 0.39093852043151855,
"learning_rate": 0.0003127962085308057,
"loss": 0.3721,
"step": 2950
},
{
"epoch": 3.4836065573770494,
"grad_norm": 0.49913489818573,
"learning_rate": 0.0003068720379146919,
"loss": 0.3926,
"step": 2975
},
{
"epoch": 3.51288056206089,
"grad_norm": 0.4530438482761383,
"learning_rate": 0.0003009478672985782,
"loss": 0.4016,
"step": 3000
},
{
"epoch": 3.5421545667447307,
"grad_norm": 0.5188443064689636,
"learning_rate": 0.0002950236966824645,
"loss": 0.4079,
"step": 3025
},
{
"epoch": 3.571428571428571,
"grad_norm": 0.632804274559021,
"learning_rate": 0.0002890995260663507,
"loss": 0.4173,
"step": 3050
},
{
"epoch": 3.600702576112412,
"grad_norm": 0.38659924268722534,
"learning_rate": 0.000283175355450237,
"loss": 0.4724,
"step": 3075
},
{
"epoch": 3.607728337236534,
"eval_loss": 0.45248714089393616,
"eval_runtime": 9993.3519,
"eval_samples_per_second": 0.152,
"eval_steps_per_second": 0.01,
"eval_wer": 33.128660047669406,
"step": 3081
},
{
"epoch": 3.629976580796253,
"grad_norm": 0.4062461256980896,
"learning_rate": 0.0002772511848341232,
"loss": 0.3773,
"step": 3100
},
{
"epoch": 3.659250585480094,
"grad_norm": 0.3994309604167938,
"learning_rate": 0.0002713270142180095,
"loss": 0.3728,
"step": 3125
},
{
"epoch": 3.6885245901639343,
"grad_norm": 0.49005889892578125,
"learning_rate": 0.0002654028436018958,
"loss": 0.3888,
"step": 3150
},
{
"epoch": 3.717798594847775,
"grad_norm": 0.5987063050270081,
"learning_rate": 0.000259478672985782,
"loss": 0.3343,
"step": 3175
},
{
"epoch": 3.747072599531616,
"grad_norm": 0.4161163568496704,
"learning_rate": 0.00025355450236966824,
"loss": 0.3598,
"step": 3200
},
{
"epoch": 3.7763466042154565,
"grad_norm": 0.49151715636253357,
"learning_rate": 0.0002476303317535545,
"loss": 0.3479,
"step": 3225
},
{
"epoch": 3.8056206088992974,
"grad_norm": 0.48634546995162964,
"learning_rate": 0.00024170616113744077,
"loss": 0.418,
"step": 3250
},
{
"epoch": 3.8348946135831383,
"grad_norm": 0.6226612329483032,
"learning_rate": 0.000235781990521327,
"loss": 0.4501,
"step": 3275
},
{
"epoch": 3.8641686182669788,
"grad_norm": 0.5559201240539551,
"learning_rate": 0.00022985781990521325,
"loss": 0.3354,
"step": 3300
},
{
"epoch": 3.8852459016393444,
"eval_loss": 0.38408055901527405,
"eval_runtime": 10005.3823,
"eval_samples_per_second": 0.152,
"eval_steps_per_second": 0.009,
"eval_wer": 30.916382850049335,
"step": 3318
},
{
"epoch": 3.8934426229508197,
"grad_norm": 0.48179224133491516,
"learning_rate": 0.00022393364928909954,
"loss": 0.3966,
"step": 3325
},
{
"epoch": 3.9227166276346606,
"grad_norm": 0.6447755098342896,
"learning_rate": 0.0002180094786729858,
"loss": 0.4289,
"step": 3350
},
{
"epoch": 3.951990632318501,
"grad_norm": 0.5592084527015686,
"learning_rate": 0.00021208530805687204,
"loss": 0.4374,
"step": 3375
},
{
"epoch": 3.981264637002342,
"grad_norm": 0.8099021315574646,
"learning_rate": 0.0002061611374407583,
"loss": 0.4103,
"step": 3400
},
{
"epoch": 4.010538641686183,
"grad_norm": 0.4467124938964844,
"learning_rate": 0.00020023696682464458,
"loss": 0.3623,
"step": 3425
},
{
"epoch": 4.039812646370024,
"grad_norm": 0.7924293279647827,
"learning_rate": 0.0001943127962085308,
"loss": 0.4256,
"step": 3450
},
{
"epoch": 4.069086651053865,
"grad_norm": 0.5706892013549805,
"learning_rate": 0.00018838862559241706,
"loss": 0.4215,
"step": 3475
},
{
"epoch": 4.098360655737705,
"grad_norm": 0.5217132568359375,
"learning_rate": 0.00018246445497630332,
"loss": 0.3815,
"step": 3500
},
{
"epoch": 4.1276346604215455,
"grad_norm": 0.6682723164558411,
"learning_rate": 0.00017654028436018957,
"loss": 0.4053,
"step": 3525
},
{
"epoch": 4.156908665105386,
"grad_norm": 0.34864214062690735,
"learning_rate": 0.00017061611374407585,
"loss": 0.341,
"step": 3550
},
{
"epoch": 4.162763466042154,
"eval_loss": 0.3835831880569458,
"eval_runtime": 9981.995,
"eval_samples_per_second": 0.152,
"eval_steps_per_second": 0.01,
"eval_wer": 31.677117484164626,
"step": 3555
},
{
"epoch": 4.186182669789227,
"grad_norm": 0.44574442505836487,
"learning_rate": 0.0001646919431279621,
"loss": 0.3327,
"step": 3575
},
{
"epoch": 4.215456674473068,
"grad_norm": 0.3936574459075928,
"learning_rate": 0.00015876777251184833,
"loss": 0.3432,
"step": 3600
},
{
"epoch": 4.244730679156909,
"grad_norm": 0.4089759290218353,
"learning_rate": 0.0001528436018957346,
"loss": 0.4012,
"step": 3625
},
{
"epoch": 4.274004683840749,
"grad_norm": 0.2764008641242981,
"learning_rate": 0.00014691943127962084,
"loss": 0.3951,
"step": 3650
},
{
"epoch": 4.30327868852459,
"grad_norm": 0.5552195310592651,
"learning_rate": 0.00014099526066350712,
"loss": 0.3913,
"step": 3675
},
{
"epoch": 4.332552693208431,
"grad_norm": 0.5924739241600037,
"learning_rate": 0.00013507109004739338,
"loss": 0.4114,
"step": 3700
},
{
"epoch": 4.361826697892272,
"grad_norm": 0.40245234966278076,
"learning_rate": 0.00012914691943127963,
"loss": 0.4296,
"step": 3725
},
{
"epoch": 4.391100702576113,
"grad_norm": 0.47809380292892456,
"learning_rate": 0.0001232227488151659,
"loss": 0.3637,
"step": 3750
},
{
"epoch": 4.4203747072599535,
"grad_norm": 0.5671224594116211,
"learning_rate": 0.00011729857819905214,
"loss": 0.3418,
"step": 3775
},
{
"epoch": 4.4402810304449645,
"eval_loss": 0.3801835775375366,
"eval_runtime": 9999.3716,
"eval_samples_per_second": 0.152,
"eval_steps_per_second": 0.01,
"eval_wer": 30.77792278066015,
"step": 3792
}
],
"logging_steps": 25,
"max_steps": 4270,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 237,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.274664582086656e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}