HAUSA_B / checkpoint-2000 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
62af3f6 verified
{
"best_global_step": 2000,
"best_metric": 32.74580361567799,
"best_model_checkpoint": "./HAUSA_B/checkpoint-2000",
"epoch": 2.706838185511171,
"eval_steps": 200,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.027081922816519974,
"grad_norm": 106.59215545654297,
"learning_rate": 8.000000000000001e-07,
"loss": 6.1915,
"step": 20
},
{
"epoch": 0.05416384563303995,
"grad_norm": 30.563518524169922,
"learning_rate": 1.8000000000000001e-06,
"loss": 4.5886,
"step": 40
},
{
"epoch": 0.08124576844955991,
"grad_norm": 16.528457641601562,
"learning_rate": 2.8000000000000003e-06,
"loss": 3.0338,
"step": 60
},
{
"epoch": 0.1083276912660799,
"grad_norm": 13.852096557617188,
"learning_rate": 3.8000000000000005e-06,
"loss": 2.3188,
"step": 80
},
{
"epoch": 0.13540961408259986,
"grad_norm": 13.284646987915039,
"learning_rate": 4.800000000000001e-06,
"loss": 1.9725,
"step": 100
},
{
"epoch": 0.16249153689911983,
"grad_norm": 13.212055206298828,
"learning_rate": 5.8e-06,
"loss": 1.7839,
"step": 120
},
{
"epoch": 0.1895734597156398,
"grad_norm": 12.006990432739258,
"learning_rate": 6.800000000000001e-06,
"loss": 1.5991,
"step": 140
},
{
"epoch": 0.2166553825321598,
"grad_norm": 12.490514755249023,
"learning_rate": 7.800000000000002e-06,
"loss": 1.4411,
"step": 160
},
{
"epoch": 0.24373730534867977,
"grad_norm": 11.587646484375,
"learning_rate": 8.8e-06,
"loss": 1.3487,
"step": 180
},
{
"epoch": 0.2708192281651997,
"grad_norm": 11.574933052062988,
"learning_rate": 9.800000000000001e-06,
"loss": 1.2119,
"step": 200
},
{
"epoch": 0.2708192281651997,
"eval_loss": 1.1157827377319336,
"eval_runtime": 1601.2991,
"eval_samples_per_second": 3.688,
"eval_steps_per_second": 0.462,
"eval_wer": 64.22461160550755,
"step": 200
},
{
"epoch": 0.2979011509817197,
"grad_norm": 8.368565559387207,
"learning_rate": 9.920556107249256e-06,
"loss": 0.9452,
"step": 220
},
{
"epoch": 0.32498307379823965,
"grad_norm": 7.2616682052612305,
"learning_rate": 9.821251241310825e-06,
"loss": 0.9298,
"step": 240
},
{
"epoch": 0.35206499661475965,
"grad_norm": 7.266394138336182,
"learning_rate": 9.721946375372395e-06,
"loss": 0.8567,
"step": 260
},
{
"epoch": 0.3791469194312796,
"grad_norm": 8.120067596435547,
"learning_rate": 9.622641509433963e-06,
"loss": 0.8251,
"step": 280
},
{
"epoch": 0.4062288422477996,
"grad_norm": 7.483547687530518,
"learning_rate": 9.523336643495532e-06,
"loss": 0.7774,
"step": 300
},
{
"epoch": 0.4333107650643196,
"grad_norm": 7.521896839141846,
"learning_rate": 9.4240317775571e-06,
"loss": 0.7266,
"step": 320
},
{
"epoch": 0.46039268788083954,
"grad_norm": 7.073266506195068,
"learning_rate": 9.32472691161867e-06,
"loss": 0.7248,
"step": 340
},
{
"epoch": 0.48747461069735953,
"grad_norm": 6.335423469543457,
"learning_rate": 9.22542204568024e-06,
"loss": 0.7151,
"step": 360
},
{
"epoch": 0.5145565335138795,
"grad_norm": 6.936922550201416,
"learning_rate": 9.126117179741808e-06,
"loss": 0.6881,
"step": 380
},
{
"epoch": 0.5416384563303994,
"grad_norm": 7.596807479858398,
"learning_rate": 9.026812313803377e-06,
"loss": 0.6995,
"step": 400
},
{
"epoch": 0.5416384563303994,
"eval_loss": 0.6352065801620483,
"eval_runtime": 1597.3635,
"eval_samples_per_second": 3.697,
"eval_steps_per_second": 0.463,
"eval_wer": 51.63653601672089,
"step": 400
},
{
"epoch": 0.5687203791469194,
"grad_norm": 6.981812477111816,
"learning_rate": 8.927507447864945e-06,
"loss": 0.6795,
"step": 420
},
{
"epoch": 0.5958023019634394,
"grad_norm": 6.481506824493408,
"learning_rate": 8.828202581926516e-06,
"loss": 0.6643,
"step": 440
},
{
"epoch": 0.6228842247799594,
"grad_norm": 6.537086009979248,
"learning_rate": 8.728897715988084e-06,
"loss": 0.6459,
"step": 460
},
{
"epoch": 0.6499661475964793,
"grad_norm": 6.739567756652832,
"learning_rate": 8.629592850049653e-06,
"loss": 0.6736,
"step": 480
},
{
"epoch": 0.6770480704129993,
"grad_norm": 7.422546863555908,
"learning_rate": 8.530287984111221e-06,
"loss": 0.6591,
"step": 500
},
{
"epoch": 0.7041299932295193,
"grad_norm": 5.7051215171813965,
"learning_rate": 8.430983118172792e-06,
"loss": 0.6442,
"step": 520
},
{
"epoch": 0.7312119160460393,
"grad_norm": 7.166143417358398,
"learning_rate": 8.33167825223436e-06,
"loss": 0.6326,
"step": 540
},
{
"epoch": 0.7582938388625592,
"grad_norm": 7.759460926055908,
"learning_rate": 8.232373386295929e-06,
"loss": 0.6328,
"step": 560
},
{
"epoch": 0.7853757616790792,
"grad_norm": 5.876537799835205,
"learning_rate": 8.133068520357497e-06,
"loss": 0.5945,
"step": 580
},
{
"epoch": 0.8124576844955992,
"grad_norm": 6.475106716156006,
"learning_rate": 8.033763654419066e-06,
"loss": 0.6038,
"step": 600
},
{
"epoch": 0.8124576844955992,
"eval_loss": 0.5318673849105835,
"eval_runtime": 1614.0202,
"eval_samples_per_second": 3.659,
"eval_steps_per_second": 0.458,
"eval_wer": 44.61419121291129,
"step": 600
},
{
"epoch": 0.8395396073121192,
"grad_norm": 6.8348259925842285,
"learning_rate": 7.934458788480636e-06,
"loss": 0.6046,
"step": 620
},
{
"epoch": 0.8666215301286392,
"grad_norm": 6.060784339904785,
"learning_rate": 7.835153922542206e-06,
"loss": 0.6055,
"step": 640
},
{
"epoch": 0.8937034529451591,
"grad_norm": 6.01262903213501,
"learning_rate": 7.735849056603775e-06,
"loss": 0.6139,
"step": 660
},
{
"epoch": 0.9207853757616791,
"grad_norm": 6.706854820251465,
"learning_rate": 7.636544190665344e-06,
"loss": 0.5789,
"step": 680
},
{
"epoch": 0.9478672985781991,
"grad_norm": 6.464681625366211,
"learning_rate": 7.537239324726913e-06,
"loss": 0.5857,
"step": 700
},
{
"epoch": 0.9749492213947191,
"grad_norm": 7.8035478591918945,
"learning_rate": 7.437934458788482e-06,
"loss": 0.5739,
"step": 720
},
{
"epoch": 1.001354096140826,
"grad_norm": 5.574532985687256,
"learning_rate": 7.33862959285005e-06,
"loss": 0.5443,
"step": 740
},
{
"epoch": 1.028436018957346,
"grad_norm": 5.275153160095215,
"learning_rate": 7.23932472691162e-06,
"loss": 0.4705,
"step": 760
},
{
"epoch": 1.055517941773866,
"grad_norm": 5.722527027130127,
"learning_rate": 7.140019860973188e-06,
"loss": 0.4692,
"step": 780
},
{
"epoch": 1.0825998645903858,
"grad_norm": 5.501762866973877,
"learning_rate": 7.040714995034758e-06,
"loss": 0.4791,
"step": 800
},
{
"epoch": 1.0825998645903858,
"eval_loss": 0.46895861625671387,
"eval_runtime": 1599.6376,
"eval_samples_per_second": 3.691,
"eval_steps_per_second": 0.462,
"eval_wer": 40.9726561658299,
"step": 800
},
{
"epoch": 1.1096817874069058,
"grad_norm": 5.207645893096924,
"learning_rate": 6.941410129096326e-06,
"loss": 0.4569,
"step": 820
},
{
"epoch": 1.1367637102234258,
"grad_norm": 5.584475517272949,
"learning_rate": 6.842105263157896e-06,
"loss": 0.4262,
"step": 840
},
{
"epoch": 1.1638456330399458,
"grad_norm": 5.881894588470459,
"learning_rate": 6.742800397219464e-06,
"loss": 0.4533,
"step": 860
},
{
"epoch": 1.1909275558564658,
"grad_norm": 6.939334869384766,
"learning_rate": 6.643495531281034e-06,
"loss": 0.4675,
"step": 880
},
{
"epoch": 1.2180094786729858,
"grad_norm": 5.780360698699951,
"learning_rate": 6.544190665342602e-06,
"loss": 0.4238,
"step": 900
},
{
"epoch": 1.2450914014895058,
"grad_norm": 5.867160797119141,
"learning_rate": 6.444885799404172e-06,
"loss": 0.444,
"step": 920
},
{
"epoch": 1.2721733243060258,
"grad_norm": 6.121824741363525,
"learning_rate": 6.34558093346574e-06,
"loss": 0.4333,
"step": 940
},
{
"epoch": 1.2992552471225456,
"grad_norm": 5.132157802581787,
"learning_rate": 6.24627606752731e-06,
"loss": 0.4544,
"step": 960
},
{
"epoch": 1.3263371699390656,
"grad_norm": 6.384315013885498,
"learning_rate": 6.146971201588878e-06,
"loss": 0.4332,
"step": 980
},
{
"epoch": 1.3534190927555856,
"grad_norm": 4.54695987701416,
"learning_rate": 6.047666335650447e-06,
"loss": 0.4416,
"step": 1000
},
{
"epoch": 1.3534190927555856,
"eval_loss": 0.43043428659439087,
"eval_runtime": 1614.3899,
"eval_samples_per_second": 3.658,
"eval_steps_per_second": 0.458,
"eval_wer": 38.00771402098731,
"step": 1000
},
{
"epoch": 1.3805010155721056,
"grad_norm": 4.973900318145752,
"learning_rate": 5.948361469712016e-06,
"loss": 0.4401,
"step": 1020
},
{
"epoch": 1.4075829383886256,
"grad_norm": 5.952788352966309,
"learning_rate": 5.849056603773585e-06,
"loss": 0.4865,
"step": 1040
},
{
"epoch": 1.4346648612051456,
"grad_norm": 6.601942539215088,
"learning_rate": 5.749751737835154e-06,
"loss": 0.4435,
"step": 1060
},
{
"epoch": 1.4617467840216656,
"grad_norm": 6.17143440246582,
"learning_rate": 5.650446871896723e-06,
"loss": 0.451,
"step": 1080
},
{
"epoch": 1.4888287068381856,
"grad_norm": 5.782886981964111,
"learning_rate": 5.551142005958292e-06,
"loss": 0.4311,
"step": 1100
},
{
"epoch": 1.5159106296547056,
"grad_norm": 5.734127998352051,
"learning_rate": 5.451837140019861e-06,
"loss": 0.4632,
"step": 1120
},
{
"epoch": 1.5429925524712256,
"grad_norm": 5.601761341094971,
"learning_rate": 5.35253227408143e-06,
"loss": 0.424,
"step": 1140
},
{
"epoch": 1.5700744752877456,
"grad_norm": 5.866110801696777,
"learning_rate": 5.253227408142999e-06,
"loss": 0.4273,
"step": 1160
},
{
"epoch": 1.5971563981042654,
"grad_norm": 5.120361328125,
"learning_rate": 5.153922542204568e-06,
"loss": 0.4297,
"step": 1180
},
{
"epoch": 1.6242383209207854,
"grad_norm": 5.093082427978516,
"learning_rate": 5.054617676266137e-06,
"loss": 0.4321,
"step": 1200
},
{
"epoch": 1.6242383209207854,
"eval_loss": 0.3993258476257324,
"eval_runtime": 1619.3754,
"eval_samples_per_second": 3.646,
"eval_steps_per_second": 0.456,
"eval_wer": 37.091943373052636,
"step": 1200
},
{
"epoch": 1.6513202437373053,
"grad_norm": 5.076949119567871,
"learning_rate": 4.955312810327706e-06,
"loss": 0.4446,
"step": 1220
},
{
"epoch": 1.6784021665538253,
"grad_norm": 5.729442596435547,
"learning_rate": 4.856007944389276e-06,
"loss": 0.4375,
"step": 1240
},
{
"epoch": 1.7054840893703453,
"grad_norm": 5.660022258758545,
"learning_rate": 4.756703078450844e-06,
"loss": 0.4318,
"step": 1260
},
{
"epoch": 1.7325660121868651,
"grad_norm": 5.320847511291504,
"learning_rate": 4.657398212512414e-06,
"loss": 0.4255,
"step": 1280
},
{
"epoch": 1.7596479350033851,
"grad_norm": 4.652419567108154,
"learning_rate": 4.558093346573982e-06,
"loss": 0.4231,
"step": 1300
},
{
"epoch": 1.7867298578199051,
"grad_norm": 6.301181793212891,
"learning_rate": 4.458788480635551e-06,
"loss": 0.4426,
"step": 1320
},
{
"epoch": 1.8138117806364251,
"grad_norm": 5.540495872497559,
"learning_rate": 4.35948361469712e-06,
"loss": 0.4361,
"step": 1340
},
{
"epoch": 1.8408937034529451,
"grad_norm": 5.618797302246094,
"learning_rate": 4.260178748758689e-06,
"loss": 0.4081,
"step": 1360
},
{
"epoch": 1.8679756262694651,
"grad_norm": 5.519278526306152,
"learning_rate": 4.160873882820258e-06,
"loss": 0.4311,
"step": 1380
},
{
"epoch": 1.8950575490859851,
"grad_norm": 5.24718713760376,
"learning_rate": 4.061569016881828e-06,
"loss": 0.441,
"step": 1400
},
{
"epoch": 1.8950575490859851,
"eval_loss": 0.37384942173957825,
"eval_runtime": 1616.0813,
"eval_samples_per_second": 3.654,
"eval_steps_per_second": 0.457,
"eval_wer": 35.20006895214291,
"step": 1400
},
{
"epoch": 1.9221394719025051,
"grad_norm": 5.9886884689331055,
"learning_rate": 3.962264150943396e-06,
"loss": 0.4125,
"step": 1420
},
{
"epoch": 1.9492213947190251,
"grad_norm": 5.233187198638916,
"learning_rate": 3.862959285004966e-06,
"loss": 0.4179,
"step": 1440
},
{
"epoch": 1.9763033175355451,
"grad_norm": 5.533965587615967,
"learning_rate": 3.763654419066535e-06,
"loss": 0.4272,
"step": 1460
},
{
"epoch": 2.002708192281652,
"grad_norm": 5.179864883422852,
"learning_rate": 3.664349553128104e-06,
"loss": 0.4082,
"step": 1480
},
{
"epoch": 2.029790115098172,
"grad_norm": 4.748936653137207,
"learning_rate": 3.565044687189673e-06,
"loss": 0.3085,
"step": 1500
},
{
"epoch": 2.056872037914692,
"grad_norm": 5.267838001251221,
"learning_rate": 3.4657398212512415e-06,
"loss": 0.312,
"step": 1520
},
{
"epoch": 2.083953960731212,
"grad_norm": 6.315608501434326,
"learning_rate": 3.3664349553128105e-06,
"loss": 0.3512,
"step": 1540
},
{
"epoch": 2.111035883547732,
"grad_norm": 5.232484340667725,
"learning_rate": 3.2671300893743795e-06,
"loss": 0.3184,
"step": 1560
},
{
"epoch": 2.138117806364252,
"grad_norm": 4.316751956939697,
"learning_rate": 3.1678252234359485e-06,
"loss": 0.3129,
"step": 1580
},
{
"epoch": 2.1651997291807716,
"grad_norm": 4.040640354156494,
"learning_rate": 3.0685203574975175e-06,
"loss": 0.3073,
"step": 1600
},
{
"epoch": 2.1651997291807716,
"eval_loss": 0.3516213595867157,
"eval_runtime": 1617.0984,
"eval_samples_per_second": 3.652,
"eval_steps_per_second": 0.457,
"eval_wer": 34.23258419703075,
"step": 1600
},
{
"epoch": 2.1922816519972916,
"grad_norm": 4.437410831451416,
"learning_rate": 2.9692154915590865e-06,
"loss": 0.3293,
"step": 1620
},
{
"epoch": 2.2193635748138116,
"grad_norm": 4.284883499145508,
"learning_rate": 2.8699106256206555e-06,
"loss": 0.3237,
"step": 1640
},
{
"epoch": 2.2464454976303316,
"grad_norm": 4.959640979766846,
"learning_rate": 2.7706057596822245e-06,
"loss": 0.3374,
"step": 1660
},
{
"epoch": 2.2735274204468516,
"grad_norm": 5.366809368133545,
"learning_rate": 2.6713008937437935e-06,
"loss": 0.3086,
"step": 1680
},
{
"epoch": 2.3006093432633716,
"grad_norm": 4.363138198852539,
"learning_rate": 2.571996027805363e-06,
"loss": 0.331,
"step": 1700
},
{
"epoch": 2.3276912660798916,
"grad_norm": 4.7536468505859375,
"learning_rate": 2.4726911618669315e-06,
"loss": 0.317,
"step": 1720
},
{
"epoch": 2.3547731888964116,
"grad_norm": 4.352657794952393,
"learning_rate": 2.3733862959285005e-06,
"loss": 0.3008,
"step": 1740
},
{
"epoch": 2.3818551117129316,
"grad_norm": 4.978397846221924,
"learning_rate": 2.2740814299900695e-06,
"loss": 0.3245,
"step": 1760
},
{
"epoch": 2.4089370345294516,
"grad_norm": 4.570504665374756,
"learning_rate": 2.174776564051639e-06,
"loss": 0.3147,
"step": 1780
},
{
"epoch": 2.4360189573459716,
"grad_norm": 5.904801845550537,
"learning_rate": 2.075471698113208e-06,
"loss": 0.3395,
"step": 1800
},
{
"epoch": 2.4360189573459716,
"eval_loss": 0.3366335332393646,
"eval_runtime": 1614.6836,
"eval_samples_per_second": 3.657,
"eval_steps_per_second": 0.458,
"eval_wer": 33.028076450688445,
"step": 1800
},
{
"epoch": 2.4631008801624916,
"grad_norm": 5.771766185760498,
"learning_rate": 1.976166832174777e-06,
"loss": 0.3064,
"step": 1820
},
{
"epoch": 2.4901828029790116,
"grad_norm": 5.262455463409424,
"learning_rate": 1.8768619662363458e-06,
"loss": 0.2961,
"step": 1840
},
{
"epoch": 2.5172647257955316,
"grad_norm": 5.288928031921387,
"learning_rate": 1.7775571002979148e-06,
"loss": 0.3285,
"step": 1860
},
{
"epoch": 2.5443466486120516,
"grad_norm": 5.816193580627441,
"learning_rate": 1.6782522343594838e-06,
"loss": 0.3254,
"step": 1880
},
{
"epoch": 2.571428571428571,
"grad_norm": 5.249015808105469,
"learning_rate": 1.5789473684210526e-06,
"loss": 0.3279,
"step": 1900
},
{
"epoch": 2.598510494245091,
"grad_norm": 4.509056568145752,
"learning_rate": 1.4796425024826216e-06,
"loss": 0.317,
"step": 1920
},
{
"epoch": 2.625592417061611,
"grad_norm": 4.113898754119873,
"learning_rate": 1.3803376365441908e-06,
"loss": 0.3135,
"step": 1940
},
{
"epoch": 2.652674339878131,
"grad_norm": 5.507041931152344,
"learning_rate": 1.2810327706057598e-06,
"loss": 0.3302,
"step": 1960
},
{
"epoch": 2.679756262694651,
"grad_norm": 4.8485846519470215,
"learning_rate": 1.1817279046673288e-06,
"loss": 0.3106,
"step": 1980
},
{
"epoch": 2.706838185511171,
"grad_norm": 5.389250755310059,
"learning_rate": 1.0824230387288978e-06,
"loss": 0.3347,
"step": 2000
},
{
"epoch": 2.706838185511171,
"eval_loss": 0.3267146348953247,
"eval_runtime": 1650.7646,
"eval_samples_per_second": 3.577,
"eval_steps_per_second": 0.448,
"eval_wer": 32.74580361567799,
"step": 2000
}
],
"logging_steps": 20,
"max_steps": 2214,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.845330483806208e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}