SALAMA_SM_ASR / checkpoint-2000 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
cbd076b verified
{
"best_global_step": 2000,
"best_metric": 1.5431796145591616,
"best_model_checkpoint": "./SALAMA_NEW8/checkpoint-2000",
"epoch": 0.798881565807869,
"eval_steps": 2000,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003994407829039345,
"grad_norm": 2.2177462577819824,
"learning_rate": 1.8e-07,
"loss": 0.022,
"step": 10
},
{
"epoch": 0.00798881565807869,
"grad_norm": 2.724672794342041,
"learning_rate": 3.8e-07,
"loss": 0.0249,
"step": 20
},
{
"epoch": 0.011983223487118035,
"grad_norm": 2.5243964195251465,
"learning_rate": 5.800000000000001e-07,
"loss": 0.0215,
"step": 30
},
{
"epoch": 0.01597763131615738,
"grad_norm": 2.4517173767089844,
"learning_rate": 7.8e-07,
"loss": 0.0229,
"step": 40
},
{
"epoch": 0.019972039145196723,
"grad_norm": 1.1104662418365479,
"learning_rate": 9.800000000000001e-07,
"loss": 0.0212,
"step": 50
},
{
"epoch": 0.02396644697423607,
"grad_norm": 3.2835190296173096,
"learning_rate": 1.1800000000000001e-06,
"loss": 0.0177,
"step": 60
},
{
"epoch": 0.027960854803275415,
"grad_norm": 2.933971405029297,
"learning_rate": 1.3800000000000001e-06,
"loss": 0.0181,
"step": 70
},
{
"epoch": 0.03195526263231476,
"grad_norm": 2.667775869369507,
"learning_rate": 1.5800000000000001e-06,
"loss": 0.0243,
"step": 80
},
{
"epoch": 0.035949670461354104,
"grad_norm": 2.8503150939941406,
"learning_rate": 1.7800000000000001e-06,
"loss": 0.0158,
"step": 90
},
{
"epoch": 0.039944078290393446,
"grad_norm": 2.6173582077026367,
"learning_rate": 1.98e-06,
"loss": 0.0217,
"step": 100
},
{
"epoch": 0.043938486119432796,
"grad_norm": 2.148167371749878,
"learning_rate": 2.1800000000000003e-06,
"loss": 0.0216,
"step": 110
},
{
"epoch": 0.04793289394847214,
"grad_norm": 1.6248950958251953,
"learning_rate": 2.38e-06,
"loss": 0.0211,
"step": 120
},
{
"epoch": 0.05192730177751148,
"grad_norm": 2.5831820964813232,
"learning_rate": 2.5800000000000003e-06,
"loss": 0.0282,
"step": 130
},
{
"epoch": 0.05592170960655083,
"grad_norm": 2.1337149143218994,
"learning_rate": 2.7800000000000005e-06,
"loss": 0.0215,
"step": 140
},
{
"epoch": 0.05991611743559017,
"grad_norm": 2.95695161819458,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.0177,
"step": 150
},
{
"epoch": 0.06391052526462952,
"grad_norm": 2.277665853500366,
"learning_rate": 3.1800000000000005e-06,
"loss": 0.0217,
"step": 160
},
{
"epoch": 0.06790493309366886,
"grad_norm": 2.531909704208374,
"learning_rate": 3.3800000000000007e-06,
"loss": 0.0375,
"step": 170
},
{
"epoch": 0.07189934092270821,
"grad_norm": 2.5161631107330322,
"learning_rate": 3.58e-06,
"loss": 0.0262,
"step": 180
},
{
"epoch": 0.07589374875174755,
"grad_norm": 2.9737093448638916,
"learning_rate": 3.7800000000000002e-06,
"loss": 0.0237,
"step": 190
},
{
"epoch": 0.07988815658078689,
"grad_norm": 2.2957046031951904,
"learning_rate": 3.980000000000001e-06,
"loss": 0.0211,
"step": 200
},
{
"epoch": 0.08388256440982625,
"grad_norm": 1.9400784969329834,
"learning_rate": 4.18e-06,
"loss": 0.0241,
"step": 210
},
{
"epoch": 0.08787697223886559,
"grad_norm": 3.7293314933776855,
"learning_rate": 4.38e-06,
"loss": 0.0263,
"step": 220
},
{
"epoch": 0.09187138006790493,
"grad_norm": 4.266306400299072,
"learning_rate": 4.58e-06,
"loss": 0.0387,
"step": 230
},
{
"epoch": 0.09586578789694428,
"grad_norm": 2.510282278060913,
"learning_rate": 4.78e-06,
"loss": 0.029,
"step": 240
},
{
"epoch": 0.09986019572598362,
"grad_norm": 1.4938842058181763,
"learning_rate": 4.980000000000001e-06,
"loss": 0.0154,
"step": 250
},
{
"epoch": 0.10385460355502296,
"grad_norm": 2.0076608657836914,
"learning_rate": 5.18e-06,
"loss": 0.0263,
"step": 260
},
{
"epoch": 0.10784901138406232,
"grad_norm": 2.252916097640991,
"learning_rate": 5.380000000000001e-06,
"loss": 0.0273,
"step": 270
},
{
"epoch": 0.11184341921310166,
"grad_norm": 0.9760869145393372,
"learning_rate": 5.580000000000001e-06,
"loss": 0.0227,
"step": 280
},
{
"epoch": 0.115837827042141,
"grad_norm": 1.7485207319259644,
"learning_rate": 5.78e-06,
"loss": 0.0234,
"step": 290
},
{
"epoch": 0.11983223487118035,
"grad_norm": 2.540903329849243,
"learning_rate": 5.98e-06,
"loss": 0.0216,
"step": 300
},
{
"epoch": 0.12382664270021969,
"grad_norm": 2.970639228820801,
"learning_rate": 6.18e-06,
"loss": 0.0244,
"step": 310
},
{
"epoch": 0.12782105052925904,
"grad_norm": 4.563722610473633,
"learning_rate": 6.380000000000001e-06,
"loss": 0.0332,
"step": 320
},
{
"epoch": 0.1318154583582984,
"grad_norm": 3.670563220977783,
"learning_rate": 6.5800000000000005e-06,
"loss": 0.0259,
"step": 330
},
{
"epoch": 0.13580986618733773,
"grad_norm": 3.6714515686035156,
"learning_rate": 6.780000000000001e-06,
"loss": 0.032,
"step": 340
},
{
"epoch": 0.13980427401637707,
"grad_norm": 2.029430389404297,
"learning_rate": 6.98e-06,
"loss": 0.0264,
"step": 350
},
{
"epoch": 0.14379868184541642,
"grad_norm": 2.609748363494873,
"learning_rate": 7.180000000000001e-06,
"loss": 0.0369,
"step": 360
},
{
"epoch": 0.14779308967445576,
"grad_norm": 1.438359260559082,
"learning_rate": 7.3800000000000005e-06,
"loss": 0.0242,
"step": 370
},
{
"epoch": 0.1517874975034951,
"grad_norm": 2.1897497177124023,
"learning_rate": 7.58e-06,
"loss": 0.02,
"step": 380
},
{
"epoch": 0.15578190533253444,
"grad_norm": 1.9179186820983887,
"learning_rate": 7.78e-06,
"loss": 0.0366,
"step": 390
},
{
"epoch": 0.15977631316157379,
"grad_norm": 2.6540606021881104,
"learning_rate": 7.980000000000002e-06,
"loss": 0.0253,
"step": 400
},
{
"epoch": 0.16377072099061313,
"grad_norm": 2.5292742252349854,
"learning_rate": 8.18e-06,
"loss": 0.0356,
"step": 410
},
{
"epoch": 0.1677651288196525,
"grad_norm": 2.4729959964752197,
"learning_rate": 8.380000000000001e-06,
"loss": 0.0327,
"step": 420
},
{
"epoch": 0.17175953664869184,
"grad_norm": 3.639272689819336,
"learning_rate": 8.580000000000001e-06,
"loss": 0.032,
"step": 430
},
{
"epoch": 0.17575394447773118,
"grad_norm": 3.2385575771331787,
"learning_rate": 8.78e-06,
"loss": 0.0359,
"step": 440
},
{
"epoch": 0.17974835230677053,
"grad_norm": 3.476755142211914,
"learning_rate": 8.98e-06,
"loss": 0.0286,
"step": 450
},
{
"epoch": 0.18374276013580987,
"grad_norm": 2.612086772918701,
"learning_rate": 9.180000000000002e-06,
"loss": 0.0228,
"step": 460
},
{
"epoch": 0.1877371679648492,
"grad_norm": 3.5403027534484863,
"learning_rate": 9.38e-06,
"loss": 0.0243,
"step": 470
},
{
"epoch": 0.19173157579388855,
"grad_norm": 2.7765207290649414,
"learning_rate": 9.58e-06,
"loss": 0.0348,
"step": 480
},
{
"epoch": 0.1957259836229279,
"grad_norm": 3.589587450027466,
"learning_rate": 9.780000000000001e-06,
"loss": 0.0323,
"step": 490
},
{
"epoch": 0.19972039145196724,
"grad_norm": 1.8719650506973267,
"learning_rate": 9.980000000000001e-06,
"loss": 0.0275,
"step": 500
},
{
"epoch": 0.20371479928100658,
"grad_norm": 2.750931739807129,
"learning_rate": 9.983636363636364e-06,
"loss": 0.0348,
"step": 510
},
{
"epoch": 0.20770920711004592,
"grad_norm": 1.9383965730667114,
"learning_rate": 9.965454545454546e-06,
"loss": 0.0282,
"step": 520
},
{
"epoch": 0.2117036149390853,
"grad_norm": 3.6504414081573486,
"learning_rate": 9.947272727272728e-06,
"loss": 0.0316,
"step": 530
},
{
"epoch": 0.21569802276812464,
"grad_norm": 3.26383376121521,
"learning_rate": 9.92909090909091e-06,
"loss": 0.0273,
"step": 540
},
{
"epoch": 0.21969243059716398,
"grad_norm": 4.769904613494873,
"learning_rate": 9.910909090909092e-06,
"loss": 0.0295,
"step": 550
},
{
"epoch": 0.22368683842620332,
"grad_norm": 1.143069863319397,
"learning_rate": 9.892727272727273e-06,
"loss": 0.027,
"step": 560
},
{
"epoch": 0.22768124625524266,
"grad_norm": 2.988460063934326,
"learning_rate": 9.874545454545455e-06,
"loss": 0.0349,
"step": 570
},
{
"epoch": 0.231675654084282,
"grad_norm": 2.6448323726654053,
"learning_rate": 9.856363636363637e-06,
"loss": 0.036,
"step": 580
},
{
"epoch": 0.23567006191332135,
"grad_norm": 3.077092409133911,
"learning_rate": 9.838181818181819e-06,
"loss": 0.0491,
"step": 590
},
{
"epoch": 0.2396644697423607,
"grad_norm": 6.214341163635254,
"learning_rate": 9.820000000000001e-06,
"loss": 0.0313,
"step": 600
},
{
"epoch": 0.24365887757140003,
"grad_norm": 3.1780426502227783,
"learning_rate": 9.801818181818183e-06,
"loss": 0.0251,
"step": 610
},
{
"epoch": 0.24765328540043938,
"grad_norm": 1.7672063112258911,
"learning_rate": 9.783636363636365e-06,
"loss": 0.0259,
"step": 620
},
{
"epoch": 0.2516476932294787,
"grad_norm": 3.9317915439605713,
"learning_rate": 9.765454545454546e-06,
"loss": 0.0263,
"step": 630
},
{
"epoch": 0.2556421010585181,
"grad_norm": 2.23710298538208,
"learning_rate": 9.747272727272728e-06,
"loss": 0.0335,
"step": 640
},
{
"epoch": 0.2596365088875574,
"grad_norm": 3.24867844581604,
"learning_rate": 9.72909090909091e-06,
"loss": 0.0383,
"step": 650
},
{
"epoch": 0.2636309167165968,
"grad_norm": 3.0995874404907227,
"learning_rate": 9.710909090909092e-06,
"loss": 0.0296,
"step": 660
},
{
"epoch": 0.2676253245456361,
"grad_norm": 1.4767210483551025,
"learning_rate": 9.692727272727274e-06,
"loss": 0.04,
"step": 670
},
{
"epoch": 0.27161973237467546,
"grad_norm": 1.7182132005691528,
"learning_rate": 9.674545454545456e-06,
"loss": 0.0294,
"step": 680
},
{
"epoch": 0.2756141402037148,
"grad_norm": 3.0770034790039062,
"learning_rate": 9.656363636363637e-06,
"loss": 0.0393,
"step": 690
},
{
"epoch": 0.27960854803275415,
"grad_norm": 2.452723503112793,
"learning_rate": 9.63818181818182e-06,
"loss": 0.0321,
"step": 700
},
{
"epoch": 0.2836029558617935,
"grad_norm": 2.9560225009918213,
"learning_rate": 9.620000000000001e-06,
"loss": 0.0292,
"step": 710
},
{
"epoch": 0.28759736369083283,
"grad_norm": 2.8095853328704834,
"learning_rate": 9.601818181818183e-06,
"loss": 0.028,
"step": 720
},
{
"epoch": 0.2915917715198722,
"grad_norm": 3.4851746559143066,
"learning_rate": 9.583636363636365e-06,
"loss": 0.0296,
"step": 730
},
{
"epoch": 0.2955861793489115,
"grad_norm": 4.2011027336120605,
"learning_rate": 9.565454545454547e-06,
"loss": 0.0353,
"step": 740
},
{
"epoch": 0.2995805871779509,
"grad_norm": 5.23146390914917,
"learning_rate": 9.547272727272728e-06,
"loss": 0.0393,
"step": 750
},
{
"epoch": 0.3035749950069902,
"grad_norm": 2.8136441707611084,
"learning_rate": 9.52909090909091e-06,
"loss": 0.0339,
"step": 760
},
{
"epoch": 0.30756940283602957,
"grad_norm": 3.0712337493896484,
"learning_rate": 9.510909090909092e-06,
"loss": 0.0414,
"step": 770
},
{
"epoch": 0.3115638106650689,
"grad_norm": 2.1798360347747803,
"learning_rate": 9.492727272727274e-06,
"loss": 0.03,
"step": 780
},
{
"epoch": 0.31555821849410826,
"grad_norm": 3.7893567085266113,
"learning_rate": 9.474545454545456e-06,
"loss": 0.0325,
"step": 790
},
{
"epoch": 0.31955262632314757,
"grad_norm": 2.0599608421325684,
"learning_rate": 9.456363636363638e-06,
"loss": 0.0389,
"step": 800
},
{
"epoch": 0.32354703415218694,
"grad_norm": 2.7613537311553955,
"learning_rate": 9.438181818181818e-06,
"loss": 0.0284,
"step": 810
},
{
"epoch": 0.32754144198122626,
"grad_norm": 2.630587577819824,
"learning_rate": 9.42e-06,
"loss": 0.0308,
"step": 820
},
{
"epoch": 0.3315358498102656,
"grad_norm": 2.8372304439544678,
"learning_rate": 9.401818181818183e-06,
"loss": 0.029,
"step": 830
},
{
"epoch": 0.335530257639305,
"grad_norm": 2.21663236618042,
"learning_rate": 9.383636363636365e-06,
"loss": 0.0367,
"step": 840
},
{
"epoch": 0.3395246654683443,
"grad_norm": 3.325446367263794,
"learning_rate": 9.365454545454547e-06,
"loss": 0.0256,
"step": 850
},
{
"epoch": 0.3435190732973837,
"grad_norm": 2.7013704776763916,
"learning_rate": 9.347272727272729e-06,
"loss": 0.0279,
"step": 860
},
{
"epoch": 0.347513481126423,
"grad_norm": 1.6712983846664429,
"learning_rate": 9.32909090909091e-06,
"loss": 0.0256,
"step": 870
},
{
"epoch": 0.35150788895546237,
"grad_norm": 3.0498671531677246,
"learning_rate": 9.310909090909092e-06,
"loss": 0.0351,
"step": 880
},
{
"epoch": 0.3555022967845017,
"grad_norm": 2.060898542404175,
"learning_rate": 9.292727272727272e-06,
"loss": 0.0349,
"step": 890
},
{
"epoch": 0.35949670461354105,
"grad_norm": 2.191037178039551,
"learning_rate": 9.274545454545454e-06,
"loss": 0.0236,
"step": 900
},
{
"epoch": 0.36349111244258037,
"grad_norm": 2.7442283630371094,
"learning_rate": 9.256363636363636e-06,
"loss": 0.0339,
"step": 910
},
{
"epoch": 0.36748552027161974,
"grad_norm": 2.151813268661499,
"learning_rate": 9.23818181818182e-06,
"loss": 0.0272,
"step": 920
},
{
"epoch": 0.37147992810065905,
"grad_norm": 3.0903069972991943,
"learning_rate": 9.220000000000002e-06,
"loss": 0.0343,
"step": 930
},
{
"epoch": 0.3754743359296984,
"grad_norm": 3.2857322692871094,
"learning_rate": 9.201818181818183e-06,
"loss": 0.028,
"step": 940
},
{
"epoch": 0.3794687437587378,
"grad_norm": 2.7144224643707275,
"learning_rate": 9.183636363636365e-06,
"loss": 0.0224,
"step": 950
},
{
"epoch": 0.3834631515877771,
"grad_norm": 2.985253095626831,
"learning_rate": 9.165454545454547e-06,
"loss": 0.0297,
"step": 960
},
{
"epoch": 0.3874575594168165,
"grad_norm": 2.6801540851593018,
"learning_rate": 9.147272727272727e-06,
"loss": 0.0306,
"step": 970
},
{
"epoch": 0.3914519672458558,
"grad_norm": 2.944761276245117,
"learning_rate": 9.129090909090909e-06,
"loss": 0.0324,
"step": 980
},
{
"epoch": 0.39544637507489516,
"grad_norm": 2.6776340007781982,
"learning_rate": 9.11090909090909e-06,
"loss": 0.04,
"step": 990
},
{
"epoch": 0.3994407829039345,
"grad_norm": 4.007734775543213,
"learning_rate": 9.092727272727273e-06,
"loss": 0.0286,
"step": 1000
},
{
"epoch": 0.40343519073297385,
"grad_norm": 3.652127265930176,
"learning_rate": 9.074545454545455e-06,
"loss": 0.0373,
"step": 1010
},
{
"epoch": 0.40742959856201316,
"grad_norm": 1.8009936809539795,
"learning_rate": 9.056363636363638e-06,
"loss": 0.024,
"step": 1020
},
{
"epoch": 0.41142400639105253,
"grad_norm": 2.0933914184570312,
"learning_rate": 9.03818181818182e-06,
"loss": 0.0341,
"step": 1030
},
{
"epoch": 0.41541841422009185,
"grad_norm": 2.683140277862549,
"learning_rate": 9.020000000000002e-06,
"loss": 0.0473,
"step": 1040
},
{
"epoch": 0.4194128220491312,
"grad_norm": 2.7582523822784424,
"learning_rate": 9.001818181818182e-06,
"loss": 0.0322,
"step": 1050
},
{
"epoch": 0.4234072298781706,
"grad_norm": 3.6831445693969727,
"learning_rate": 8.983636363636364e-06,
"loss": 0.0382,
"step": 1060
},
{
"epoch": 0.4274016377072099,
"grad_norm": 2.053619623184204,
"learning_rate": 8.965454545454546e-06,
"loss": 0.0304,
"step": 1070
},
{
"epoch": 0.4313960455362493,
"grad_norm": 2.5573840141296387,
"learning_rate": 8.947272727272727e-06,
"loss": 0.0567,
"step": 1080
},
{
"epoch": 0.4353904533652886,
"grad_norm": 3.5455336570739746,
"learning_rate": 8.92909090909091e-06,
"loss": 0.0338,
"step": 1090
},
{
"epoch": 0.43938486119432796,
"grad_norm": 2.8842337131500244,
"learning_rate": 8.910909090909091e-06,
"loss": 0.0314,
"step": 1100
},
{
"epoch": 0.4433792690233673,
"grad_norm": 2.779266834259033,
"learning_rate": 8.892727272727275e-06,
"loss": 0.0486,
"step": 1110
},
{
"epoch": 0.44737367685240664,
"grad_norm": 3.959299325942993,
"learning_rate": 8.874545454545456e-06,
"loss": 0.0396,
"step": 1120
},
{
"epoch": 0.45136808468144596,
"grad_norm": 2.053091526031494,
"learning_rate": 8.856363636363637e-06,
"loss": 0.0329,
"step": 1130
},
{
"epoch": 0.45536249251048533,
"grad_norm": 1.1432011127471924,
"learning_rate": 8.838181818181818e-06,
"loss": 0.0268,
"step": 1140
},
{
"epoch": 0.45935690033952464,
"grad_norm": 2.738510847091675,
"learning_rate": 8.82e-06,
"loss": 0.0388,
"step": 1150
},
{
"epoch": 0.463351308168564,
"grad_norm": 1.9435960054397583,
"learning_rate": 8.801818181818182e-06,
"loss": 0.0321,
"step": 1160
},
{
"epoch": 0.46734571599760333,
"grad_norm": 2.462301015853882,
"learning_rate": 8.783636363636364e-06,
"loss": 0.0344,
"step": 1170
},
{
"epoch": 0.4713401238266427,
"grad_norm": 2.8463451862335205,
"learning_rate": 8.765454545454546e-06,
"loss": 0.0372,
"step": 1180
},
{
"epoch": 0.47533453165568207,
"grad_norm": 4.407367706298828,
"learning_rate": 8.747272727272728e-06,
"loss": 0.0399,
"step": 1190
},
{
"epoch": 0.4793289394847214,
"grad_norm": 2.5179121494293213,
"learning_rate": 8.72909090909091e-06,
"loss": 0.0331,
"step": 1200
},
{
"epoch": 0.48332334731376075,
"grad_norm": 3.2438509464263916,
"learning_rate": 8.710909090909091e-06,
"loss": 0.0348,
"step": 1210
},
{
"epoch": 0.48731775514280007,
"grad_norm": 2.54004168510437,
"learning_rate": 8.692727272727273e-06,
"loss": 0.0317,
"step": 1220
},
{
"epoch": 0.49131216297183944,
"grad_norm": 2.8079185485839844,
"learning_rate": 8.674545454545455e-06,
"loss": 0.0386,
"step": 1230
},
{
"epoch": 0.49530657080087875,
"grad_norm": 4.827033519744873,
"learning_rate": 8.656363636363637e-06,
"loss": 0.0338,
"step": 1240
},
{
"epoch": 0.4993009786299181,
"grad_norm": 2.919968843460083,
"learning_rate": 8.638181818181819e-06,
"loss": 0.0265,
"step": 1250
},
{
"epoch": 0.5032953864589574,
"grad_norm": 3.6935982704162598,
"learning_rate": 8.62e-06,
"loss": 0.0327,
"step": 1260
},
{
"epoch": 0.5072897942879968,
"grad_norm": 4.243749141693115,
"learning_rate": 8.601818181818182e-06,
"loss": 0.0282,
"step": 1270
},
{
"epoch": 0.5112842021170362,
"grad_norm": 3.979485273361206,
"learning_rate": 8.583636363636364e-06,
"loss": 0.0242,
"step": 1280
},
{
"epoch": 0.5152786099460755,
"grad_norm": 3.32183575630188,
"learning_rate": 8.565454545454546e-06,
"loss": 0.0356,
"step": 1290
},
{
"epoch": 0.5192730177751148,
"grad_norm": 2.474187135696411,
"learning_rate": 8.547272727272728e-06,
"loss": 0.0201,
"step": 1300
},
{
"epoch": 0.5232674256041542,
"grad_norm": 3.150151252746582,
"learning_rate": 8.52909090909091e-06,
"loss": 0.0304,
"step": 1310
},
{
"epoch": 0.5272618334331935,
"grad_norm": 2.039504289627075,
"learning_rate": 8.510909090909092e-06,
"loss": 0.045,
"step": 1320
},
{
"epoch": 0.5312562412622329,
"grad_norm": 2.7388851642608643,
"learning_rate": 8.492727272727273e-06,
"loss": 0.0268,
"step": 1330
},
{
"epoch": 0.5352506490912722,
"grad_norm": 2.9932122230529785,
"learning_rate": 8.474545454545455e-06,
"loss": 0.0331,
"step": 1340
},
{
"epoch": 0.5392450569203115,
"grad_norm": 3.3818037509918213,
"learning_rate": 8.456363636363637e-06,
"loss": 0.0365,
"step": 1350
},
{
"epoch": 0.5432394647493509,
"grad_norm": 3.251274585723877,
"learning_rate": 8.438181818181819e-06,
"loss": 0.0395,
"step": 1360
},
{
"epoch": 0.5472338725783903,
"grad_norm": 3.46567964553833,
"learning_rate": 8.42e-06,
"loss": 0.0287,
"step": 1370
},
{
"epoch": 0.5512282804074295,
"grad_norm": 2.459820032119751,
"learning_rate": 8.401818181818183e-06,
"loss": 0.0329,
"step": 1380
},
{
"epoch": 0.5552226882364689,
"grad_norm": 1.735729694366455,
"learning_rate": 8.383636363636364e-06,
"loss": 0.0313,
"step": 1390
},
{
"epoch": 0.5592170960655083,
"grad_norm": 2.2887370586395264,
"learning_rate": 8.365454545454546e-06,
"loss": 0.0215,
"step": 1400
},
{
"epoch": 0.5632115038945477,
"grad_norm": 3.110576868057251,
"learning_rate": 8.347272727272728e-06,
"loss": 0.0213,
"step": 1410
},
{
"epoch": 0.567205911723587,
"grad_norm": 3.1144895553588867,
"learning_rate": 8.32909090909091e-06,
"loss": 0.0312,
"step": 1420
},
{
"epoch": 0.5712003195526263,
"grad_norm": 2.9777989387512207,
"learning_rate": 8.310909090909092e-06,
"loss": 0.0322,
"step": 1430
},
{
"epoch": 0.5751947273816657,
"grad_norm": 2.2770936489105225,
"learning_rate": 8.292727272727274e-06,
"loss": 0.0304,
"step": 1440
},
{
"epoch": 0.579189135210705,
"grad_norm": 3.5356180667877197,
"learning_rate": 8.274545454545455e-06,
"loss": 0.0347,
"step": 1450
},
{
"epoch": 0.5831835430397444,
"grad_norm": 3.6384565830230713,
"learning_rate": 8.256363636363637e-06,
"loss": 0.0289,
"step": 1460
},
{
"epoch": 0.5871779508687837,
"grad_norm": 3.053424119949341,
"learning_rate": 8.238181818181819e-06,
"loss": 0.0356,
"step": 1470
},
{
"epoch": 0.591172358697823,
"grad_norm": 4.9105963706970215,
"learning_rate": 8.220000000000001e-06,
"loss": 0.0315,
"step": 1480
},
{
"epoch": 0.5951667665268624,
"grad_norm": 3.0485212802886963,
"learning_rate": 8.201818181818183e-06,
"loss": 0.0298,
"step": 1490
},
{
"epoch": 0.5991611743559018,
"grad_norm": 3.3632636070251465,
"learning_rate": 8.183636363636365e-06,
"loss": 0.0268,
"step": 1500
},
{
"epoch": 0.603155582184941,
"grad_norm": 2.593235969543457,
"learning_rate": 8.165454545454546e-06,
"loss": 0.0286,
"step": 1510
},
{
"epoch": 0.6071499900139804,
"grad_norm": 2.542865753173828,
"learning_rate": 8.147272727272728e-06,
"loss": 0.0317,
"step": 1520
},
{
"epoch": 0.6111443978430198,
"grad_norm": 3.246321201324463,
"learning_rate": 8.12909090909091e-06,
"loss": 0.0358,
"step": 1530
},
{
"epoch": 0.6151388056720591,
"grad_norm": 4.592155456542969,
"learning_rate": 8.110909090909092e-06,
"loss": 0.0295,
"step": 1540
},
{
"epoch": 0.6191332135010985,
"grad_norm": 2.1040351390838623,
"learning_rate": 8.092727272727274e-06,
"loss": 0.044,
"step": 1550
},
{
"epoch": 0.6231276213301378,
"grad_norm": 2.832470417022705,
"learning_rate": 8.074545454545456e-06,
"loss": 0.0335,
"step": 1560
},
{
"epoch": 0.6271220291591771,
"grad_norm": 3.7737035751342773,
"learning_rate": 8.056363636363636e-06,
"loss": 0.0289,
"step": 1570
},
{
"epoch": 0.6311164369882165,
"grad_norm": 2.9322657585144043,
"learning_rate": 8.038181818181818e-06,
"loss": 0.0209,
"step": 1580
},
{
"epoch": 0.6351108448172559,
"grad_norm": 3.0506591796875,
"learning_rate": 8.020000000000001e-06,
"loss": 0.0277,
"step": 1590
},
{
"epoch": 0.6391052526462951,
"grad_norm": 1.679126501083374,
"learning_rate": 8.001818181818183e-06,
"loss": 0.0313,
"step": 1600
},
{
"epoch": 0.6430996604753345,
"grad_norm": 1.4025191068649292,
"learning_rate": 7.983636363636365e-06,
"loss": 0.0342,
"step": 1610
},
{
"epoch": 0.6470940683043739,
"grad_norm": 3.2698376178741455,
"learning_rate": 7.965454545454547e-06,
"loss": 0.0334,
"step": 1620
},
{
"epoch": 0.6510884761334133,
"grad_norm": 2.007560968399048,
"learning_rate": 7.947272727272728e-06,
"loss": 0.0361,
"step": 1630
},
{
"epoch": 0.6550828839624525,
"grad_norm": 3.021299123764038,
"learning_rate": 7.92909090909091e-06,
"loss": 0.0316,
"step": 1640
},
{
"epoch": 0.6590772917914919,
"grad_norm": 3.4559459686279297,
"learning_rate": 7.91090909090909e-06,
"loss": 0.0336,
"step": 1650
},
{
"epoch": 0.6630716996205313,
"grad_norm": 3.786959648132324,
"learning_rate": 7.892727272727272e-06,
"loss": 0.0337,
"step": 1660
},
{
"epoch": 0.6670661074495706,
"grad_norm": 2.5222368240356445,
"learning_rate": 7.874545454545454e-06,
"loss": 0.026,
"step": 1670
},
{
"epoch": 0.67106051527861,
"grad_norm": 2.880535364151001,
"learning_rate": 7.856363636363638e-06,
"loss": 0.023,
"step": 1680
},
{
"epoch": 0.6750549231076493,
"grad_norm": 3.375427007675171,
"learning_rate": 7.83818181818182e-06,
"loss": 0.025,
"step": 1690
},
{
"epoch": 0.6790493309366886,
"grad_norm": 1.8976999521255493,
"learning_rate": 7.820000000000001e-06,
"loss": 0.0267,
"step": 1700
},
{
"epoch": 0.683043738765728,
"grad_norm": 3.0969624519348145,
"learning_rate": 7.801818181818183e-06,
"loss": 0.0334,
"step": 1710
},
{
"epoch": 0.6870381465947674,
"grad_norm": 2.949564218521118,
"learning_rate": 7.783636363636365e-06,
"loss": 0.0317,
"step": 1720
},
{
"epoch": 0.6910325544238066,
"grad_norm": 3.1288681030273438,
"learning_rate": 7.765454545454545e-06,
"loss": 0.036,
"step": 1730
},
{
"epoch": 0.695026962252846,
"grad_norm": 2.337353229522705,
"learning_rate": 7.747272727272727e-06,
"loss": 0.0233,
"step": 1740
},
{
"epoch": 0.6990213700818854,
"grad_norm": 2.593578577041626,
"learning_rate": 7.729090909090909e-06,
"loss": 0.0338,
"step": 1750
},
{
"epoch": 0.7030157779109247,
"grad_norm": 2.1465232372283936,
"learning_rate": 7.71090909090909e-06,
"loss": 0.0289,
"step": 1760
},
{
"epoch": 0.7070101857399641,
"grad_norm": 2.932419538497925,
"learning_rate": 7.692727272727273e-06,
"loss": 0.0263,
"step": 1770
},
{
"epoch": 0.7110045935690034,
"grad_norm": 4.221918106079102,
"learning_rate": 7.674545454545456e-06,
"loss": 0.0262,
"step": 1780
},
{
"epoch": 0.7149990013980427,
"grad_norm": 3.3574588298797607,
"learning_rate": 7.656363636363638e-06,
"loss": 0.0298,
"step": 1790
},
{
"epoch": 0.7189934092270821,
"grad_norm": 3.61956787109375,
"learning_rate": 7.63818181818182e-06,
"loss": 0.0361,
"step": 1800
},
{
"epoch": 0.7229878170561215,
"grad_norm": 3.8640527725219727,
"learning_rate": 7.620000000000001e-06,
"loss": 0.0379,
"step": 1810
},
{
"epoch": 0.7269822248851607,
"grad_norm": 3.021359920501709,
"learning_rate": 7.6018181818181826e-06,
"loss": 0.0256,
"step": 1820
},
{
"epoch": 0.7309766327142001,
"grad_norm": 3.4771692752838135,
"learning_rate": 7.583636363636364e-06,
"loss": 0.0317,
"step": 1830
},
{
"epoch": 0.7349710405432395,
"grad_norm": 1.799800992012024,
"learning_rate": 7.565454545454546e-06,
"loss": 0.0318,
"step": 1840
},
{
"epoch": 0.7389654483722788,
"grad_norm": 2.8619134426116943,
"learning_rate": 7.547272727272727e-06,
"loss": 0.0351,
"step": 1850
},
{
"epoch": 0.7429598562013181,
"grad_norm": 1.9078686237335205,
"learning_rate": 7.529090909090909e-06,
"loss": 0.0223,
"step": 1860
},
{
"epoch": 0.7469542640303575,
"grad_norm": 3.3508458137512207,
"learning_rate": 7.510909090909092e-06,
"loss": 0.0219,
"step": 1870
},
{
"epoch": 0.7509486718593968,
"grad_norm": 2.5900681018829346,
"learning_rate": 7.492727272727274e-06,
"loss": 0.0231,
"step": 1880
},
{
"epoch": 0.7549430796884362,
"grad_norm": 2.878042697906494,
"learning_rate": 7.4745454545454554e-06,
"loss": 0.0269,
"step": 1890
},
{
"epoch": 0.7589374875174756,
"grad_norm": 2.814326524734497,
"learning_rate": 7.456363636363637e-06,
"loss": 0.027,
"step": 1900
},
{
"epoch": 0.7629318953465148,
"grad_norm": 3.0135231018066406,
"learning_rate": 7.438181818181819e-06,
"loss": 0.0363,
"step": 1910
},
{
"epoch": 0.7669263031755542,
"grad_norm": 4.630500793457031,
"learning_rate": 7.420000000000001e-06,
"loss": 0.0318,
"step": 1920
},
{
"epoch": 0.7709207110045936,
"grad_norm": 2.1792924404144287,
"learning_rate": 7.401818181818182e-06,
"loss": 0.0299,
"step": 1930
},
{
"epoch": 0.774915118833633,
"grad_norm": 2.716294527053833,
"learning_rate": 7.383636363636364e-06,
"loss": 0.028,
"step": 1940
},
{
"epoch": 0.7789095266626722,
"grad_norm": 2.591440439224243,
"learning_rate": 7.365454545454546e-06,
"loss": 0.0311,
"step": 1950
},
{
"epoch": 0.7829039344917116,
"grad_norm": 1.3216569423675537,
"learning_rate": 7.3472727272727275e-06,
"loss": 0.0213,
"step": 1960
},
{
"epoch": 0.786898342320751,
"grad_norm": 1.4774867296218872,
"learning_rate": 7.32909090909091e-06,
"loss": 0.0218,
"step": 1970
},
{
"epoch": 0.7908927501497903,
"grad_norm": 2.8911545276641846,
"learning_rate": 7.310909090909092e-06,
"loss": 0.0251,
"step": 1980
},
{
"epoch": 0.7948871579788296,
"grad_norm": 4.558359146118164,
"learning_rate": 7.292727272727274e-06,
"loss": 0.023,
"step": 1990
},
{
"epoch": 0.798881565807869,
"grad_norm": 2.78124737739563,
"learning_rate": 7.274545454545456e-06,
"loss": 0.0361,
"step": 2000
},
{
"epoch": 0.798881565807869,
"eval_loss": 0.017742320895195007,
"eval_runtime": 7487.3562,
"eval_samples_per_second": 2.675,
"eval_steps_per_second": 0.334,
"eval_wer": 1.5431796145591616,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 6000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.846946562048e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}