whisper-small-train / trainer_state.json
Aynursusuz's picture
Upload folder using huggingface_hub
73c3517 verified
{
"best_global_step": 2500,
"best_metric": 0.999875,
"best_model_checkpoint": "./output/checkpoint-2500",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004,
"grad_norm": 1.8289061784744263,
"learning_rate": 0.0,
"loss": 0.7116,
"step": 1
},
{
"epoch": 0.004,
"grad_norm": 1.8662855625152588,
"learning_rate": 1.44e-05,
"loss": 0.6501,
"step": 10
},
{
"epoch": 0.008,
"grad_norm": 0.9602453112602234,
"learning_rate": 3.04e-05,
"loss": 0.2983,
"step": 20
},
{
"epoch": 0.012,
"grad_norm": 0.017806751653552055,
"learning_rate": 4.64e-05,
"loss": 0.0069,
"step": 30
},
{
"epoch": 0.016,
"grad_norm": 0.005567730404436588,
"learning_rate": 6.24e-05,
"loss": 0.0056,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 0.0028145266696810722,
"learning_rate": 7.840000000000001e-05,
"loss": 0.0083,
"step": 50
},
{
"epoch": 0.024,
"grad_norm": 0.002064670203253627,
"learning_rate": 9.44e-05,
"loss": 0.0001,
"step": 60
},
{
"epoch": 0.028,
"grad_norm": 0.0016958912601694465,
"learning_rate": 0.00011040000000000001,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.032,
"grad_norm": 0.0036686942912638187,
"learning_rate": 0.0001264,
"loss": 0.0447,
"step": 80
},
{
"epoch": 0.036,
"grad_norm": 8.377481460571289,
"learning_rate": 0.0001424,
"loss": 0.0066,
"step": 90
},
{
"epoch": 0.04,
"grad_norm": 8.64479923248291,
"learning_rate": 0.00015840000000000003,
"loss": 0.0222,
"step": 100
},
{
"epoch": 0.044,
"grad_norm": 0.01056673564016819,
"learning_rate": 0.0001744,
"loss": 0.0128,
"step": 110
},
{
"epoch": 0.048,
"grad_norm": 0.007756778970360756,
"learning_rate": 0.0001904,
"loss": 0.0306,
"step": 120
},
{
"epoch": 0.052,
"grad_norm": 0.027605902403593063,
"learning_rate": 0.0002064,
"loss": 0.0185,
"step": 130
},
{
"epoch": 0.056,
"grad_norm": 0.0016576085472479463,
"learning_rate": 0.00022240000000000004,
"loss": 0.0001,
"step": 140
},
{
"epoch": 0.06,
"grad_norm": 16.3161678314209,
"learning_rate": 0.0002384,
"loss": 0.0472,
"step": 150
},
{
"epoch": 0.064,
"grad_norm": 0.385207861661911,
"learning_rate": 0.0002544,
"loss": 0.0789,
"step": 160
},
{
"epoch": 0.068,
"grad_norm": 1.33426833152771,
"learning_rate": 0.0002704,
"loss": 0.0577,
"step": 170
},
{
"epoch": 0.072,
"grad_norm": 0.7534772753715515,
"learning_rate": 0.0002864,
"loss": 0.0337,
"step": 180
},
{
"epoch": 0.076,
"grad_norm": 0.4073263108730316,
"learning_rate": 0.00030240000000000003,
"loss": 0.0354,
"step": 190
},
{
"epoch": 0.08,
"grad_norm": 0.5311892628669739,
"learning_rate": 0.00031840000000000004,
"loss": 0.0886,
"step": 200
},
{
"epoch": 0.084,
"grad_norm": 0.04398029297590256,
"learning_rate": 0.0003344,
"loss": 0.0649,
"step": 210
},
{
"epoch": 0.088,
"grad_norm": 0.02103251777589321,
"learning_rate": 0.0003504,
"loss": 0.0122,
"step": 220
},
{
"epoch": 0.092,
"grad_norm": 0.021729949861764908,
"learning_rate": 0.0003664,
"loss": 0.0007,
"step": 230
},
{
"epoch": 0.096,
"grad_norm": 10.518205642700195,
"learning_rate": 0.0003824,
"loss": 0.0542,
"step": 240
},
{
"epoch": 0.1,
"grad_norm": 0.2691439688205719,
"learning_rate": 0.00039840000000000003,
"loss": 0.1148,
"step": 250
},
{
"epoch": 0.104,
"grad_norm": 0.006621910724788904,
"learning_rate": 0.00039840000000000003,
"loss": 0.0033,
"step": 260
},
{
"epoch": 0.108,
"grad_norm": 0.006356918718665838,
"learning_rate": 0.00039662222222222224,
"loss": 0.0142,
"step": 270
},
{
"epoch": 0.112,
"grad_norm": 0.19780497252941132,
"learning_rate": 0.0003948444444444445,
"loss": 0.0532,
"step": 280
},
{
"epoch": 0.116,
"grad_norm": 0.022000627592206,
"learning_rate": 0.0003930666666666667,
"loss": 0.023,
"step": 290
},
{
"epoch": 0.12,
"grad_norm": 0.021432537585496902,
"learning_rate": 0.0003912888888888889,
"loss": 0.0132,
"step": 300
},
{
"epoch": 0.124,
"grad_norm": 0.45686644315719604,
"learning_rate": 0.00038951111111111113,
"loss": 0.0491,
"step": 310
},
{
"epoch": 0.128,
"grad_norm": 1.6421903371810913,
"learning_rate": 0.0003877333333333334,
"loss": 0.034,
"step": 320
},
{
"epoch": 0.132,
"grad_norm": 0.10952561348676682,
"learning_rate": 0.0003859555555555556,
"loss": 0.0945,
"step": 330
},
{
"epoch": 0.136,
"grad_norm": 0.05203436315059662,
"learning_rate": 0.0003841777777777778,
"loss": 0.01,
"step": 340
},
{
"epoch": 0.14,
"grad_norm": 0.06594817340373993,
"learning_rate": 0.0003824,
"loss": 0.0229,
"step": 350
},
{
"epoch": 0.144,
"grad_norm": 0.03412980958819389,
"learning_rate": 0.00038062222222222223,
"loss": 0.0086,
"step": 360
},
{
"epoch": 0.148,
"grad_norm": 0.022151971235871315,
"learning_rate": 0.00037884444444444444,
"loss": 0.0098,
"step": 370
},
{
"epoch": 0.152,
"grad_norm": 4.759343147277832,
"learning_rate": 0.00037706666666666665,
"loss": 0.0014,
"step": 380
},
{
"epoch": 0.156,
"grad_norm": 0.031497757881879807,
"learning_rate": 0.0003752888888888889,
"loss": 0.0455,
"step": 390
},
{
"epoch": 0.16,
"grad_norm": 0.11943238973617554,
"learning_rate": 0.0003735111111111111,
"loss": 0.017,
"step": 400
},
{
"epoch": 0.164,
"grad_norm": 0.020562870427966118,
"learning_rate": 0.00037173333333333333,
"loss": 0.0036,
"step": 410
},
{
"epoch": 0.168,
"grad_norm": 0.00521384971216321,
"learning_rate": 0.00036995555555555554,
"loss": 0.0004,
"step": 420
},
{
"epoch": 0.172,
"grad_norm": 2.1800899505615234,
"learning_rate": 0.00036817777777777775,
"loss": 0.0337,
"step": 430
},
{
"epoch": 0.176,
"grad_norm": 0.14933565258979797,
"learning_rate": 0.0003664,
"loss": 0.02,
"step": 440
},
{
"epoch": 0.18,
"grad_norm": 1.230485439300537,
"learning_rate": 0.0003646222222222222,
"loss": 0.0351,
"step": 450
},
{
"epoch": 0.184,
"grad_norm": 0.012220942415297031,
"learning_rate": 0.00036284444444444443,
"loss": 0.0026,
"step": 460
},
{
"epoch": 0.188,
"grad_norm": 0.00523759750649333,
"learning_rate": 0.00036106666666666664,
"loss": 0.0002,
"step": 470
},
{
"epoch": 0.192,
"grad_norm": 0.19926713407039642,
"learning_rate": 0.0003592888888888889,
"loss": 0.0263,
"step": 480
},
{
"epoch": 0.196,
"grad_norm": 0.0035884054377675056,
"learning_rate": 0.0003575111111111111,
"loss": 0.0267,
"step": 490
},
{
"epoch": 0.2,
"grad_norm": 0.30148079991340637,
"learning_rate": 0.0003557333333333333,
"loss": 0.0446,
"step": 500
},
{
"epoch": 0.204,
"grad_norm": 0.046939630061388016,
"learning_rate": 0.00035395555555555553,
"loss": 0.0084,
"step": 510
},
{
"epoch": 0.208,
"grad_norm": 0.02836071327328682,
"learning_rate": 0.0003521777777777778,
"loss": 0.0161,
"step": 520
},
{
"epoch": 0.212,
"grad_norm": 0.06878186017274857,
"learning_rate": 0.0003504,
"loss": 0.0082,
"step": 530
},
{
"epoch": 0.216,
"grad_norm": 0.14635486900806427,
"learning_rate": 0.0003486222222222222,
"loss": 0.033,
"step": 540
},
{
"epoch": 0.22,
"grad_norm": 0.02621046081185341,
"learning_rate": 0.0003468444444444445,
"loss": 0.0234,
"step": 550
},
{
"epoch": 0.224,
"grad_norm": 0.020144036039710045,
"learning_rate": 0.0003450666666666667,
"loss": 0.005,
"step": 560
},
{
"epoch": 0.228,
"grad_norm": 0.06109397113323212,
"learning_rate": 0.0003432888888888889,
"loss": 0.0126,
"step": 570
},
{
"epoch": 0.232,
"grad_norm": 0.02218439429998398,
"learning_rate": 0.0003415111111111111,
"loss": 0.0011,
"step": 580
},
{
"epoch": 0.236,
"grad_norm": 0.0069372705183923244,
"learning_rate": 0.00033973333333333337,
"loss": 0.0003,
"step": 590
},
{
"epoch": 0.24,
"grad_norm": 0.004556322004646063,
"learning_rate": 0.0003379555555555556,
"loss": 0.0001,
"step": 600
},
{
"epoch": 0.244,
"grad_norm": 0.003253826405853033,
"learning_rate": 0.0003361777777777778,
"loss": 0.0001,
"step": 610
},
{
"epoch": 0.248,
"grad_norm": 0.0028496645390987396,
"learning_rate": 0.0003344,
"loss": 0.0001,
"step": 620
},
{
"epoch": 0.252,
"grad_norm": 0.0023998478427529335,
"learning_rate": 0.00033262222222222226,
"loss": 0.0001,
"step": 630
},
{
"epoch": 0.256,
"grad_norm": 0.0020449627190828323,
"learning_rate": 0.00033084444444444447,
"loss": 0.0,
"step": 640
},
{
"epoch": 0.26,
"grad_norm": 0.13762225210666656,
"learning_rate": 0.0003290666666666667,
"loss": 0.0216,
"step": 650
},
{
"epoch": 0.264,
"grad_norm": 9.54612922668457,
"learning_rate": 0.0003272888888888889,
"loss": 0.0634,
"step": 660
},
{
"epoch": 0.268,
"grad_norm": 2.0493884086608887,
"learning_rate": 0.00032551111111111115,
"loss": 0.116,
"step": 670
},
{
"epoch": 0.272,
"grad_norm": 0.08442521095275879,
"learning_rate": 0.00032373333333333336,
"loss": 0.0113,
"step": 680
},
{
"epoch": 0.276,
"grad_norm": 0.010931574739515781,
"learning_rate": 0.00032195555555555557,
"loss": 0.0089,
"step": 690
},
{
"epoch": 0.28,
"grad_norm": 0.08980958163738251,
"learning_rate": 0.0003201777777777778,
"loss": 0.0173,
"step": 700
},
{
"epoch": 0.284,
"grad_norm": 0.0746162161231041,
"learning_rate": 0.00031840000000000004,
"loss": 0.009,
"step": 710
},
{
"epoch": 0.288,
"grad_norm": 0.022072354331612587,
"learning_rate": 0.00031662222222222225,
"loss": 0.0016,
"step": 720
},
{
"epoch": 0.292,
"grad_norm": 0.007012135349214077,
"learning_rate": 0.00031484444444444446,
"loss": 0.0084,
"step": 730
},
{
"epoch": 0.296,
"grad_norm": 0.009781530126929283,
"learning_rate": 0.00031306666666666667,
"loss": 0.0291,
"step": 740
},
{
"epoch": 0.3,
"grad_norm": 0.03511832281947136,
"learning_rate": 0.00031128888888888893,
"loss": 0.0103,
"step": 750
},
{
"epoch": 0.304,
"grad_norm": 0.03628341481089592,
"learning_rate": 0.00030951111111111114,
"loss": 0.0013,
"step": 760
},
{
"epoch": 0.308,
"grad_norm": 0.008655968122184277,
"learning_rate": 0.00030773333333333335,
"loss": 0.0005,
"step": 770
},
{
"epoch": 0.312,
"grad_norm": 0.015080983750522137,
"learning_rate": 0.00030595555555555556,
"loss": 0.0209,
"step": 780
},
{
"epoch": 0.316,
"grad_norm": 0.025626640766859055,
"learning_rate": 0.0003041777777777778,
"loss": 0.0068,
"step": 790
},
{
"epoch": 0.32,
"grad_norm": 0.02122630551457405,
"learning_rate": 0.00030240000000000003,
"loss": 0.0018,
"step": 800
},
{
"epoch": 0.324,
"grad_norm": 0.019404616206884384,
"learning_rate": 0.00030062222222222224,
"loss": 0.0088,
"step": 810
},
{
"epoch": 0.328,
"grad_norm": 0.037797220051288605,
"learning_rate": 0.00029884444444444445,
"loss": 0.0139,
"step": 820
},
{
"epoch": 0.332,
"grad_norm": 1.059258222579956,
"learning_rate": 0.0002970666666666667,
"loss": 0.0051,
"step": 830
},
{
"epoch": 0.336,
"grad_norm": 2.2363479137420654,
"learning_rate": 0.0002952888888888889,
"loss": 0.0125,
"step": 840
},
{
"epoch": 0.34,
"grad_norm": 0.0257643461227417,
"learning_rate": 0.00029351111111111113,
"loss": 0.0121,
"step": 850
},
{
"epoch": 0.344,
"grad_norm": 0.023524092510342598,
"learning_rate": 0.00029173333333333334,
"loss": 0.0011,
"step": 860
},
{
"epoch": 0.348,
"grad_norm": 0.07415185868740082,
"learning_rate": 0.0002899555555555556,
"loss": 0.0008,
"step": 870
},
{
"epoch": 0.352,
"grad_norm": 0.003202820662409067,
"learning_rate": 0.0002881777777777778,
"loss": 0.0011,
"step": 880
},
{
"epoch": 0.356,
"grad_norm": 0.4437563121318817,
"learning_rate": 0.0002864,
"loss": 0.0088,
"step": 890
},
{
"epoch": 0.36,
"grad_norm": 0.00555839529260993,
"learning_rate": 0.00028462222222222223,
"loss": 0.0107,
"step": 900
},
{
"epoch": 0.364,
"grad_norm": 0.008060933090746403,
"learning_rate": 0.00028284444444444444,
"loss": 0.002,
"step": 910
},
{
"epoch": 0.368,
"grad_norm": 0.011799403466284275,
"learning_rate": 0.0002810666666666667,
"loss": 0.0248,
"step": 920
},
{
"epoch": 0.372,
"grad_norm": 0.042705778032541275,
"learning_rate": 0.0002792888888888889,
"loss": 0.0022,
"step": 930
},
{
"epoch": 0.376,
"grad_norm": 0.005020072218030691,
"learning_rate": 0.0002775111111111111,
"loss": 0.0072,
"step": 940
},
{
"epoch": 0.38,
"grad_norm": 0.002978922799229622,
"learning_rate": 0.00027573333333333333,
"loss": 0.0154,
"step": 950
},
{
"epoch": 0.384,
"grad_norm": 3.051888942718506,
"learning_rate": 0.00027395555555555554,
"loss": 0.0065,
"step": 960
},
{
"epoch": 0.388,
"grad_norm": 0.10347535461187363,
"learning_rate": 0.00027217777777777775,
"loss": 0.0033,
"step": 970
},
{
"epoch": 0.392,
"grad_norm": 0.008343281224370003,
"learning_rate": 0.0002704,
"loss": 0.0003,
"step": 980
},
{
"epoch": 0.396,
"grad_norm": 0.0071385628543794155,
"learning_rate": 0.0002686222222222222,
"loss": 0.0003,
"step": 990
},
{
"epoch": 0.4,
"grad_norm": 0.003130185417830944,
"learning_rate": 0.00026684444444444443,
"loss": 0.0062,
"step": 1000
},
{
"epoch": 0.404,
"grad_norm": 0.001496330020017922,
"learning_rate": 0.00026506666666666664,
"loss": 0.0005,
"step": 1010
},
{
"epoch": 0.408,
"grad_norm": 0.8067265748977661,
"learning_rate": 0.0002632888888888889,
"loss": 0.0147,
"step": 1020
},
{
"epoch": 0.412,
"grad_norm": 0.0030374648049473763,
"learning_rate": 0.0002615111111111111,
"loss": 0.0,
"step": 1030
},
{
"epoch": 0.416,
"grad_norm": 0.005266970954835415,
"learning_rate": 0.0002597333333333333,
"loss": 0.0018,
"step": 1040
},
{
"epoch": 0.42,
"grad_norm": 0.015546767972409725,
"learning_rate": 0.00025795555555555553,
"loss": 0.0078,
"step": 1050
},
{
"epoch": 0.424,
"grad_norm": 0.009915663860738277,
"learning_rate": 0.0002561777777777778,
"loss": 0.0021,
"step": 1060
},
{
"epoch": 0.428,
"grad_norm": 0.027595413848757744,
"learning_rate": 0.0002544,
"loss": 0.0069,
"step": 1070
},
{
"epoch": 0.432,
"grad_norm": 0.033019836992025375,
"learning_rate": 0.0002526222222222222,
"loss": 0.0231,
"step": 1080
},
{
"epoch": 0.436,
"grad_norm": 0.06228971853852272,
"learning_rate": 0.0002508444444444444,
"loss": 0.0162,
"step": 1090
},
{
"epoch": 0.44,
"grad_norm": 0.02616875059902668,
"learning_rate": 0.0002490666666666667,
"loss": 0.0029,
"step": 1100
},
{
"epoch": 0.444,
"grad_norm": 0.01255160290747881,
"learning_rate": 0.0002472888888888889,
"loss": 0.0014,
"step": 1110
},
{
"epoch": 0.448,
"grad_norm": 0.007698772940784693,
"learning_rate": 0.0002455111111111111,
"loss": 0.0003,
"step": 1120
},
{
"epoch": 0.452,
"grad_norm": 0.00799358356744051,
"learning_rate": 0.0002437333333333333,
"loss": 0.0003,
"step": 1130
},
{
"epoch": 0.456,
"grad_norm": 0.09039674699306488,
"learning_rate": 0.00024195555555555558,
"loss": 0.0003,
"step": 1140
},
{
"epoch": 0.46,
"grad_norm": 0.0030508041381835938,
"learning_rate": 0.00024017777777777779,
"loss": 0.0004,
"step": 1150
},
{
"epoch": 0.464,
"grad_norm": 0.0018616029992699623,
"learning_rate": 0.0002384,
"loss": 0.0053,
"step": 1160
},
{
"epoch": 0.468,
"grad_norm": 0.0019433458801358938,
"learning_rate": 0.0002366222222222222,
"loss": 0.0,
"step": 1170
},
{
"epoch": 0.472,
"grad_norm": 0.0020802582148462534,
"learning_rate": 0.00023484444444444447,
"loss": 0.0018,
"step": 1180
},
{
"epoch": 0.476,
"grad_norm": 0.2650572657585144,
"learning_rate": 0.00023306666666666668,
"loss": 0.0005,
"step": 1190
},
{
"epoch": 0.48,
"grad_norm": 0.0014364586677402258,
"learning_rate": 0.00023128888888888889,
"loss": 0.0114,
"step": 1200
},
{
"epoch": 0.484,
"grad_norm": 0.0016622517723590136,
"learning_rate": 0.00022951111111111115,
"loss": 0.0032,
"step": 1210
},
{
"epoch": 0.488,
"grad_norm": 0.0018463089363649487,
"learning_rate": 0.00022773333333333336,
"loss": 0.0001,
"step": 1220
},
{
"epoch": 0.492,
"grad_norm": 0.0017726500518620014,
"learning_rate": 0.00022595555555555557,
"loss": 0.0,
"step": 1230
},
{
"epoch": 0.496,
"grad_norm": 0.0016171345487236977,
"learning_rate": 0.00022417777777777778,
"loss": 0.0,
"step": 1240
},
{
"epoch": 0.5,
"grad_norm": 0.001747308298945427,
"learning_rate": 0.00022240000000000004,
"loss": 0.0,
"step": 1250
},
{
"epoch": 0.504,
"grad_norm": 0.01567615382373333,
"learning_rate": 0.00022062222222222225,
"loss": 0.0,
"step": 1260
},
{
"epoch": 0.508,
"grad_norm": 0.03938116133213043,
"learning_rate": 0.00021884444444444446,
"loss": 0.0001,
"step": 1270
},
{
"epoch": 0.512,
"grad_norm": 0.001731475000269711,
"learning_rate": 0.00021706666666666667,
"loss": 0.0,
"step": 1280
},
{
"epoch": 0.516,
"grad_norm": 0.0011080361437052488,
"learning_rate": 0.00021528888888888893,
"loss": 0.0,
"step": 1290
},
{
"epoch": 0.52,
"grad_norm": 0.001253710244782269,
"learning_rate": 0.00021351111111111114,
"loss": 0.0,
"step": 1300
},
{
"epoch": 0.524,
"grad_norm": 0.0009362171404063702,
"learning_rate": 0.00021173333333333335,
"loss": 0.0002,
"step": 1310
},
{
"epoch": 0.528,
"grad_norm": 0.0010680261766538024,
"learning_rate": 0.00020995555555555556,
"loss": 0.0,
"step": 1320
},
{
"epoch": 0.532,
"grad_norm": 0.0009266381966881454,
"learning_rate": 0.0002081777777777778,
"loss": 0.0,
"step": 1330
},
{
"epoch": 0.536,
"grad_norm": 0.0008506786543875933,
"learning_rate": 0.0002064,
"loss": 0.0,
"step": 1340
},
{
"epoch": 0.54,
"grad_norm": 0.000809083750937134,
"learning_rate": 0.00020462222222222224,
"loss": 0.0,
"step": 1350
},
{
"epoch": 0.544,
"grad_norm": 0.0008981467690318823,
"learning_rate": 0.00020284444444444445,
"loss": 0.0,
"step": 1360
},
{
"epoch": 0.548,
"grad_norm": 0.006928270682692528,
"learning_rate": 0.00020106666666666668,
"loss": 0.0157,
"step": 1370
},
{
"epoch": 0.552,
"grad_norm": 0.019026894122362137,
"learning_rate": 0.0001992888888888889,
"loss": 0.0003,
"step": 1380
},
{
"epoch": 0.556,
"grad_norm": 0.018439382314682007,
"learning_rate": 0.0001975111111111111,
"loss": 0.0019,
"step": 1390
},
{
"epoch": 0.56,
"grad_norm": 0.00941744539886713,
"learning_rate": 0.00019573333333333334,
"loss": 0.0007,
"step": 1400
},
{
"epoch": 0.564,
"grad_norm": 0.00815660785883665,
"learning_rate": 0.00019395555555555555,
"loss": 0.0002,
"step": 1410
},
{
"epoch": 0.568,
"grad_norm": 0.039481475949287415,
"learning_rate": 0.00019217777777777778,
"loss": 0.0096,
"step": 1420
},
{
"epoch": 0.572,
"grad_norm": 0.005729417782276869,
"learning_rate": 0.0001904,
"loss": 0.0112,
"step": 1430
},
{
"epoch": 0.576,
"grad_norm": 0.007418328896164894,
"learning_rate": 0.00018862222222222223,
"loss": 0.0077,
"step": 1440
},
{
"epoch": 0.58,
"grad_norm": 0.012174281291663647,
"learning_rate": 0.00018684444444444444,
"loss": 0.0101,
"step": 1450
},
{
"epoch": 0.584,
"grad_norm": 0.015086760744452477,
"learning_rate": 0.00018506666666666667,
"loss": 0.0003,
"step": 1460
},
{
"epoch": 0.588,
"grad_norm": 0.01587488315999508,
"learning_rate": 0.00018328888888888888,
"loss": 0.0004,
"step": 1470
},
{
"epoch": 0.592,
"grad_norm": 0.03278527408838272,
"learning_rate": 0.00018151111111111112,
"loss": 0.014,
"step": 1480
},
{
"epoch": 0.596,
"grad_norm": 0.10622735321521759,
"learning_rate": 0.00017973333333333333,
"loss": 0.0012,
"step": 1490
},
{
"epoch": 0.6,
"grad_norm": 0.014216107316315174,
"learning_rate": 0.00017795555555555557,
"loss": 0.0006,
"step": 1500
},
{
"epoch": 0.604,
"grad_norm": 0.008536259643733501,
"learning_rate": 0.0001761777777777778,
"loss": 0.0004,
"step": 1510
},
{
"epoch": 0.608,
"grad_norm": 0.005456692539155483,
"learning_rate": 0.0001744,
"loss": 0.003,
"step": 1520
},
{
"epoch": 0.612,
"grad_norm": 0.005484251771122217,
"learning_rate": 0.00017262222222222225,
"loss": 0.0002,
"step": 1530
},
{
"epoch": 0.616,
"grad_norm": 0.006969142239540815,
"learning_rate": 0.00017084444444444446,
"loss": 0.0152,
"step": 1540
},
{
"epoch": 0.62,
"grad_norm": 0.01335048582404852,
"learning_rate": 0.0001690666666666667,
"loss": 0.0003,
"step": 1550
},
{
"epoch": 0.624,
"grad_norm": 0.012201100587844849,
"learning_rate": 0.0001672888888888889,
"loss": 0.0026,
"step": 1560
},
{
"epoch": 0.628,
"grad_norm": 0.01430495735257864,
"learning_rate": 0.00016551111111111114,
"loss": 0.0006,
"step": 1570
},
{
"epoch": 0.632,
"grad_norm": 0.03964800387620926,
"learning_rate": 0.00016373333333333335,
"loss": 0.0052,
"step": 1580
},
{
"epoch": 0.636,
"grad_norm": 0.007560222875326872,
"learning_rate": 0.00016195555555555558,
"loss": 0.0002,
"step": 1590
},
{
"epoch": 0.64,
"grad_norm": 0.00928121991455555,
"learning_rate": 0.0001601777777777778,
"loss": 0.0002,
"step": 1600
},
{
"epoch": 0.644,
"grad_norm": 0.016387827694416046,
"learning_rate": 0.00015840000000000003,
"loss": 0.0002,
"step": 1610
},
{
"epoch": 0.648,
"grad_norm": 0.005653001833707094,
"learning_rate": 0.00015662222222222224,
"loss": 0.0002,
"step": 1620
},
{
"epoch": 0.652,
"grad_norm": 0.005010788328945637,
"learning_rate": 0.00015484444444444445,
"loss": 0.0026,
"step": 1630
},
{
"epoch": 0.656,
"grad_norm": 0.00448651984333992,
"learning_rate": 0.00015306666666666666,
"loss": 0.0001,
"step": 1640
},
{
"epoch": 0.66,
"grad_norm": 0.011779513210058212,
"learning_rate": 0.0001512888888888889,
"loss": 0.0001,
"step": 1650
},
{
"epoch": 0.664,
"grad_norm": 0.005999232176691294,
"learning_rate": 0.0001495111111111111,
"loss": 0.0026,
"step": 1660
},
{
"epoch": 0.668,
"grad_norm": 0.005314210895448923,
"learning_rate": 0.00014773333333333334,
"loss": 0.0001,
"step": 1670
},
{
"epoch": 0.672,
"grad_norm": 0.00803409144282341,
"learning_rate": 0.00014595555555555555,
"loss": 0.0008,
"step": 1680
},
{
"epoch": 0.676,
"grad_norm": 1.069173812866211,
"learning_rate": 0.00014417777777777778,
"loss": 0.0009,
"step": 1690
},
{
"epoch": 0.68,
"grad_norm": 0.013021737337112427,
"learning_rate": 0.0001424,
"loss": 0.0002,
"step": 1700
},
{
"epoch": 0.684,
"grad_norm": 0.0024978595320135355,
"learning_rate": 0.00014062222222222223,
"loss": 0.0089,
"step": 1710
},
{
"epoch": 0.688,
"grad_norm": 0.003698146902024746,
"learning_rate": 0.00013884444444444444,
"loss": 0.0138,
"step": 1720
},
{
"epoch": 0.692,
"grad_norm": 0.03162381052970886,
"learning_rate": 0.00013706666666666667,
"loss": 0.0047,
"step": 1730
},
{
"epoch": 0.696,
"grad_norm": 0.022371523082256317,
"learning_rate": 0.00013528888888888888,
"loss": 0.0021,
"step": 1740
},
{
"epoch": 0.7,
"grad_norm": 0.008335133083164692,
"learning_rate": 0.00013351111111111112,
"loss": 0.004,
"step": 1750
},
{
"epoch": 0.704,
"grad_norm": 0.01031657587736845,
"learning_rate": 0.00013173333333333333,
"loss": 0.0057,
"step": 1760
},
{
"epoch": 0.708,
"grad_norm": 0.008161673322319984,
"learning_rate": 0.00012995555555555556,
"loss": 0.0062,
"step": 1770
},
{
"epoch": 0.712,
"grad_norm": 0.007272409275174141,
"learning_rate": 0.00012817777777777777,
"loss": 0.0002,
"step": 1780
},
{
"epoch": 0.716,
"grad_norm": 0.006052941549569368,
"learning_rate": 0.0001264,
"loss": 0.0004,
"step": 1790
},
{
"epoch": 0.72,
"grad_norm": 0.00472453935071826,
"learning_rate": 0.00012462222222222222,
"loss": 0.0015,
"step": 1800
},
{
"epoch": 0.724,
"grad_norm": 0.004381476901471615,
"learning_rate": 0.00012284444444444445,
"loss": 0.0001,
"step": 1810
},
{
"epoch": 0.728,
"grad_norm": 0.004298892803490162,
"learning_rate": 0.00012106666666666666,
"loss": 0.0001,
"step": 1820
},
{
"epoch": 0.732,
"grad_norm": 0.003755762241780758,
"learning_rate": 0.0001192888888888889,
"loss": 0.0069,
"step": 1830
},
{
"epoch": 0.736,
"grad_norm": 0.002545682480558753,
"learning_rate": 0.00011751111111111112,
"loss": 0.0001,
"step": 1840
},
{
"epoch": 0.74,
"grad_norm": 0.0083426209166646,
"learning_rate": 0.00011573333333333333,
"loss": 0.0145,
"step": 1850
},
{
"epoch": 0.744,
"grad_norm": 0.03931976854801178,
"learning_rate": 0.00011395555555555557,
"loss": 0.0011,
"step": 1860
},
{
"epoch": 0.748,
"grad_norm": 0.012124504894018173,
"learning_rate": 0.00011217777777777778,
"loss": 0.0008,
"step": 1870
},
{
"epoch": 0.752,
"grad_norm": 0.00980482716113329,
"learning_rate": 0.00011040000000000001,
"loss": 0.0037,
"step": 1880
},
{
"epoch": 0.756,
"grad_norm": 2.7182815074920654,
"learning_rate": 0.00010862222222222222,
"loss": 0.0013,
"step": 1890
},
{
"epoch": 0.76,
"grad_norm": 0.00282659032382071,
"learning_rate": 0.00010684444444444446,
"loss": 0.0015,
"step": 1900
},
{
"epoch": 0.764,
"grad_norm": 0.0028170158620923758,
"learning_rate": 0.00010506666666666667,
"loss": 0.0002,
"step": 1910
},
{
"epoch": 0.768,
"grad_norm": 0.0013493013102561235,
"learning_rate": 0.0001032888888888889,
"loss": 0.0011,
"step": 1920
},
{
"epoch": 0.772,
"grad_norm": 0.0013236267259344459,
"learning_rate": 0.00010151111111111111,
"loss": 0.0113,
"step": 1930
},
{
"epoch": 0.776,
"grad_norm": 0.0017112682107836008,
"learning_rate": 9.973333333333334e-05,
"loss": 0.0,
"step": 1940
},
{
"epoch": 0.78,
"grad_norm": 0.0022746031172573566,
"learning_rate": 9.795555555555556e-05,
"loss": 0.0002,
"step": 1950
},
{
"epoch": 0.784,
"grad_norm": 0.0015560939209535718,
"learning_rate": 9.617777777777778e-05,
"loss": 0.0003,
"step": 1960
},
{
"epoch": 0.788,
"grad_norm": 0.001704063848592341,
"learning_rate": 9.44e-05,
"loss": 0.0001,
"step": 1970
},
{
"epoch": 0.792,
"grad_norm": 0.13681542873382568,
"learning_rate": 9.262222222222223e-05,
"loss": 0.0001,
"step": 1980
},
{
"epoch": 0.796,
"grad_norm": 0.0013939260970801115,
"learning_rate": 9.084444444444445e-05,
"loss": 0.0,
"step": 1990
},
{
"epoch": 0.8,
"grad_norm": 0.0016302280128002167,
"learning_rate": 8.906666666666667e-05,
"loss": 0.0,
"step": 2000
},
{
"epoch": 0.804,
"grad_norm": 0.0014311623526737094,
"learning_rate": 8.72888888888889e-05,
"loss": 0.0001,
"step": 2010
},
{
"epoch": 0.808,
"grad_norm": 0.0013674832880496979,
"learning_rate": 8.551111111111112e-05,
"loss": 0.0,
"step": 2020
},
{
"epoch": 0.812,
"grad_norm": 0.0013156217755749822,
"learning_rate": 8.373333333333334e-05,
"loss": 0.0,
"step": 2030
},
{
"epoch": 0.816,
"grad_norm": 0.7331569790840149,
"learning_rate": 8.195555555555556e-05,
"loss": 0.0131,
"step": 2040
},
{
"epoch": 0.82,
"grad_norm": 0.006600831635296345,
"learning_rate": 8.017777777777779e-05,
"loss": 0.0168,
"step": 2050
},
{
"epoch": 0.824,
"grad_norm": 0.012540962547063828,
"learning_rate": 7.840000000000001e-05,
"loss": 0.0005,
"step": 2060
},
{
"epoch": 0.828,
"grad_norm": 0.023732764646410942,
"learning_rate": 7.662222222222223e-05,
"loss": 0.0004,
"step": 2070
},
{
"epoch": 0.832,
"grad_norm": 0.01323721744120121,
"learning_rate": 7.484444444444445e-05,
"loss": 0.0009,
"step": 2080
},
{
"epoch": 0.836,
"grad_norm": 0.009665262885391712,
"learning_rate": 7.306666666666668e-05,
"loss": 0.0003,
"step": 2090
},
{
"epoch": 0.84,
"grad_norm": 0.008318779990077019,
"learning_rate": 7.12888888888889e-05,
"loss": 0.0002,
"step": 2100
},
{
"epoch": 0.844,
"grad_norm": 0.0076893046498298645,
"learning_rate": 6.951111111111112e-05,
"loss": 0.0002,
"step": 2110
},
{
"epoch": 0.848,
"grad_norm": 0.007633762434124947,
"learning_rate": 6.773333333333333e-05,
"loss": 0.0002,
"step": 2120
},
{
"epoch": 0.852,
"grad_norm": 0.0073342034593224525,
"learning_rate": 6.595555555555555e-05,
"loss": 0.0002,
"step": 2130
},
{
"epoch": 0.856,
"grad_norm": 0.005839989520609379,
"learning_rate": 6.417777777777778e-05,
"loss": 0.0001,
"step": 2140
},
{
"epoch": 0.86,
"grad_norm": 0.005278122611343861,
"learning_rate": 6.24e-05,
"loss": 0.0001,
"step": 2150
},
{
"epoch": 0.864,
"grad_norm": 0.006889206822961569,
"learning_rate": 6.062222222222222e-05,
"loss": 0.0001,
"step": 2160
},
{
"epoch": 0.868,
"grad_norm": 0.7518675923347473,
"learning_rate": 5.8844444444444444e-05,
"loss": 0.0011,
"step": 2170
},
{
"epoch": 0.872,
"grad_norm": 0.009561669081449509,
"learning_rate": 5.706666666666667e-05,
"loss": 0.0015,
"step": 2180
},
{
"epoch": 0.876,
"grad_norm": 0.003935567103326321,
"learning_rate": 5.528888888888889e-05,
"loss": 0.0013,
"step": 2190
},
{
"epoch": 0.88,
"grad_norm": 0.003710120217874646,
"learning_rate": 5.351111111111111e-05,
"loss": 0.0001,
"step": 2200
},
{
"epoch": 0.884,
"grad_norm": 0.002907069865614176,
"learning_rate": 5.1733333333333335e-05,
"loss": 0.0002,
"step": 2210
},
{
"epoch": 0.888,
"grad_norm": 0.0030114708933979273,
"learning_rate": 4.995555555555556e-05,
"loss": 0.0023,
"step": 2220
},
{
"epoch": 0.892,
"grad_norm": 0.0034878470469266176,
"learning_rate": 4.817777777777778e-05,
"loss": 0.0001,
"step": 2230
},
{
"epoch": 0.896,
"grad_norm": 0.003132345387712121,
"learning_rate": 4.64e-05,
"loss": 0.0004,
"step": 2240
},
{
"epoch": 0.9,
"grad_norm": 0.0030635548755526543,
"learning_rate": 4.4622222222222226e-05,
"loss": 0.0001,
"step": 2250
},
{
"epoch": 0.904,
"grad_norm": 0.0031918687745928764,
"learning_rate": 4.284444444444445e-05,
"loss": 0.0031,
"step": 2260
},
{
"epoch": 0.908,
"grad_norm": 0.006079728715121746,
"learning_rate": 4.106666666666667e-05,
"loss": 0.0001,
"step": 2270
},
{
"epoch": 0.912,
"grad_norm": 0.0049600861966609955,
"learning_rate": 3.9288888888888894e-05,
"loss": 0.0001,
"step": 2280
},
{
"epoch": 0.916,
"grad_norm": 0.006550566293299198,
"learning_rate": 3.7511111111111116e-05,
"loss": 0.0001,
"step": 2290
},
{
"epoch": 0.92,
"grad_norm": 0.0037377977278083563,
"learning_rate": 3.573333333333333e-05,
"loss": 0.0043,
"step": 2300
},
{
"epoch": 0.924,
"grad_norm": 0.0033655581064522266,
"learning_rate": 3.3955555555555555e-05,
"loss": 0.0001,
"step": 2310
},
{
"epoch": 0.928,
"grad_norm": 0.0035526896826922894,
"learning_rate": 3.217777777777778e-05,
"loss": 0.0001,
"step": 2320
},
{
"epoch": 0.932,
"grad_norm": 0.0034034913405776024,
"learning_rate": 3.04e-05,
"loss": 0.0001,
"step": 2330
},
{
"epoch": 0.936,
"grad_norm": 0.006957338657230139,
"learning_rate": 2.8622222222222223e-05,
"loss": 0.0156,
"step": 2340
},
{
"epoch": 0.94,
"grad_norm": 0.007531840819865465,
"learning_rate": 2.6844444444444446e-05,
"loss": 0.0001,
"step": 2350
},
{
"epoch": 0.944,
"grad_norm": 0.00489840330556035,
"learning_rate": 2.5066666666666665e-05,
"loss": 0.0001,
"step": 2360
},
{
"epoch": 0.948,
"grad_norm": 0.005002748221158981,
"learning_rate": 2.328888888888889e-05,
"loss": 0.0001,
"step": 2370
},
{
"epoch": 0.952,
"grad_norm": 0.0048461793921887875,
"learning_rate": 2.1511111111111114e-05,
"loss": 0.0001,
"step": 2380
},
{
"epoch": 0.956,
"grad_norm": 0.0058443606831133366,
"learning_rate": 1.9733333333333333e-05,
"loss": 0.0004,
"step": 2390
},
{
"epoch": 0.96,
"grad_norm": 0.0049603781662881374,
"learning_rate": 1.7955555555555556e-05,
"loss": 0.0002,
"step": 2400
},
{
"epoch": 0.964,
"grad_norm": 0.005183044355362654,
"learning_rate": 1.617777777777778e-05,
"loss": 0.0035,
"step": 2410
},
{
"epoch": 0.968,
"grad_norm": 0.0050833080895245075,
"learning_rate": 1.44e-05,
"loss": 0.0012,
"step": 2420
},
{
"epoch": 0.972,
"grad_norm": 0.004844759125262499,
"learning_rate": 1.2622222222222224e-05,
"loss": 0.0001,
"step": 2430
},
{
"epoch": 0.976,
"grad_norm": 0.004592160694301128,
"learning_rate": 1.0844444444444445e-05,
"loss": 0.0012,
"step": 2440
},
{
"epoch": 0.98,
"grad_norm": 0.004524698480963707,
"learning_rate": 9.066666666666667e-06,
"loss": 0.0001,
"step": 2450
},
{
"epoch": 0.984,
"grad_norm": 0.005091739818453789,
"learning_rate": 7.288888888888889e-06,
"loss": 0.0002,
"step": 2460
},
{
"epoch": 0.988,
"grad_norm": 0.004614758305251598,
"learning_rate": 5.511111111111111e-06,
"loss": 0.0001,
"step": 2470
},
{
"epoch": 0.992,
"grad_norm": 0.010195998474955559,
"learning_rate": 3.7333333333333337e-06,
"loss": 0.0003,
"step": 2480
},
{
"epoch": 0.996,
"grad_norm": 0.004264904651790857,
"learning_rate": 1.9555555555555556e-06,
"loss": 0.0002,
"step": 2490
},
{
"epoch": 1.0,
"grad_norm": 0.004516505636274815,
"learning_rate": 1.777777777777778e-07,
"loss": 0.0002,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.999875,
"eval_loss": 0.0004970150184817612,
"eval_runtime": 2540.0992,
"eval_samples_per_second": 15.747,
"eval_steps_per_second": 0.984,
"step": 2500
}
],
"logging_steps": 10,
"max_steps": 2500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.0090762496e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}