ljcamargo's picture
Training in progress, step 2250, checkpoint
4a7702d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 300,
"global_step": 2250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0044444444444444444,
"grad_norm": 240.13470458984375,
"learning_rate": 2.7272727272727273e-05,
"loss": 11.9773,
"step": 10
},
{
"epoch": 0.008888888888888889,
"grad_norm": 34.34966278076172,
"learning_rate": 7.272727272727273e-05,
"loss": 9.6995,
"step": 20
},
{
"epoch": 0.013333333333333334,
"grad_norm": 22.52939224243164,
"learning_rate": 0.0001181818181818182,
"loss": 6.6389,
"step": 30
},
{
"epoch": 0.017777777777777778,
"grad_norm": 19.9622745513916,
"learning_rate": 0.00016363636363636366,
"loss": 3.7452,
"step": 40
},
{
"epoch": 0.022222222222222223,
"grad_norm": 16.391326904296875,
"learning_rate": 0.0001999995943808681,
"loss": 3.7348,
"step": 50
},
{
"epoch": 0.02666666666666667,
"grad_norm": 13.753044128417969,
"learning_rate": 0.00019998539805675495,
"loss": 3.3315,
"step": 60
},
{
"epoch": 0.03111111111111111,
"grad_norm": 12.171226501464844,
"learning_rate": 0.00019995092406646422,
"loss": 3.2473,
"step": 70
},
{
"epoch": 0.035555555555555556,
"grad_norm": 14.022675514221191,
"learning_rate": 0.00019989617940153752,
"loss": 3.1642,
"step": 80
},
{
"epoch": 0.04,
"grad_norm": 10.995219230651855,
"learning_rate": 0.0001998211751645364,
"loss": 2.9791,
"step": 90
},
{
"epoch": 0.044444444444444446,
"grad_norm": 13.217508316040039,
"learning_rate": 0.00019972592656679084,
"loss": 3.0747,
"step": 100
},
{
"epoch": 0.04888888888888889,
"grad_norm": 13.215702056884766,
"learning_rate": 0.0001996104529253142,
"loss": 2.8731,
"step": 110
},
{
"epoch": 0.05333333333333334,
"grad_norm": 12.901390075683594,
"learning_rate": 0.00019947477765888558,
"loss": 2.6857,
"step": 120
},
{
"epoch": 0.057777777777777775,
"grad_norm": 10.362764358520508,
"learning_rate": 0.00019931892828330047,
"loss": 2.6396,
"step": 130
},
{
"epoch": 0.06222222222222222,
"grad_norm": 17.187591552734375,
"learning_rate": 0.00019914293640579023,
"loss": 2.4217,
"step": 140
},
{
"epoch": 0.06666666666666667,
"grad_norm": 11.482383728027344,
"learning_rate": 0.00019894683771861206,
"loss": 2.2934,
"step": 150
},
{
"epoch": 0.07111111111111111,
"grad_norm": 9.790170669555664,
"learning_rate": 0.00019873067199181033,
"loss": 2.372,
"step": 160
},
{
"epoch": 0.07555555555555556,
"grad_norm": 12.26194953918457,
"learning_rate": 0.00019849448306515094,
"loss": 2.2495,
"step": 170
},
{
"epoch": 0.08,
"grad_norm": 10.313677787780762,
"learning_rate": 0.00019823831883923046,
"loss": 2.2395,
"step": 180
},
{
"epoch": 0.08444444444444445,
"grad_norm": 12.778143882751465,
"learning_rate": 0.00019796223126576138,
"loss": 2.2095,
"step": 190
},
{
"epoch": 0.08888888888888889,
"grad_norm": 10.047595024108887,
"learning_rate": 0.00019766627633703622,
"loss": 2.1296,
"step": 200
},
{
"epoch": 0.09333333333333334,
"grad_norm": 13.92696475982666,
"learning_rate": 0.00019735051407457176,
"loss": 2.0027,
"step": 210
},
{
"epoch": 0.09777777777777778,
"grad_norm": 13.361489295959473,
"learning_rate": 0.00019701500851693635,
"loss": 1.8503,
"step": 220
},
{
"epoch": 0.10222222222222223,
"grad_norm": 11.596494674682617,
"learning_rate": 0.00019665982770676255,
"loss": 2.0362,
"step": 230
},
{
"epoch": 0.10666666666666667,
"grad_norm": 15.200052261352539,
"learning_rate": 0.00019628504367694754,
"loss": 1.9158,
"step": 240
},
{
"epoch": 0.1111111111111111,
"grad_norm": 14.349342346191406,
"learning_rate": 0.00019589073243604454,
"loss": 1.8403,
"step": 250
},
{
"epoch": 0.11555555555555555,
"grad_norm": 14.814216613769531,
"learning_rate": 0.00019547697395284766,
"loss": 1.9161,
"step": 260
},
{
"epoch": 0.12,
"grad_norm": 8.870625495910645,
"learning_rate": 0.0001950438521401738,
"loss": 1.7764,
"step": 270
},
{
"epoch": 0.12444444444444444,
"grad_norm": 10.462891578674316,
"learning_rate": 0.0001945914548378446,
"loss": 1.9073,
"step": 280
},
{
"epoch": 0.1288888888888889,
"grad_norm": 10.39869499206543,
"learning_rate": 0.00019411987379487196,
"loss": 1.7828,
"step": 290
},
{
"epoch": 0.13333333333333333,
"grad_norm": 10.365347862243652,
"learning_rate": 0.0001936292046508508,
"loss": 1.7332,
"step": 300
},
{
"epoch": 0.13333333333333333,
"eval_code_acc": 0.0,
"eval_exact_match": 0.0,
"eval_family_acc": 0.0,
"eval_loss": 1.4483628273010254,
"eval_runtime": 275.4346,
"eval_samples_per_second": 1.815,
"eval_steps_per_second": 1.815,
"eval_super_acc": 0.0,
"step": 300
},
{
"epoch": 0.13777777777777778,
"grad_norm": 12.327225685119629,
"learning_rate": 0.00019311954691656264,
"loss": 1.6749,
"step": 310
},
{
"epoch": 0.14222222222222222,
"grad_norm": 11.838521003723145,
"learning_rate": 0.00019259100395379434,
"loss": 1.6202,
"step": 320
},
{
"epoch": 0.14666666666666667,
"grad_norm": 14.03673267364502,
"learning_rate": 0.0001920436829543756,
"loss": 1.6491,
"step": 330
},
{
"epoch": 0.1511111111111111,
"grad_norm": 15.616336822509766,
"learning_rate": 0.00019147769491843978,
"loss": 1.683,
"step": 340
},
{
"epoch": 0.15555555555555556,
"grad_norm": 12.534092903137207,
"learning_rate": 0.00019089315463191234,
"loss": 1.5952,
"step": 350
},
{
"epoch": 0.16,
"grad_norm": 11.09432315826416,
"learning_rate": 0.00019029018064323165,
"loss": 1.622,
"step": 360
},
{
"epoch": 0.16444444444444445,
"grad_norm": 12.834360122680664,
"learning_rate": 0.00018966889523930656,
"loss": 1.5646,
"step": 370
},
{
"epoch": 0.1688888888888889,
"grad_norm": 25.459949493408203,
"learning_rate": 0.0001890294244207158,
"loss": 1.5177,
"step": 380
},
{
"epoch": 0.17333333333333334,
"grad_norm": 19.265300750732422,
"learning_rate": 0.0001883718978761544,
"loss": 1.5986,
"step": 390
},
{
"epoch": 0.17777777777777778,
"grad_norm": 11.887044906616211,
"learning_rate": 0.00018769644895613174,
"loss": 1.613,
"step": 400
},
{
"epoch": 0.18222222222222223,
"grad_norm": 21.538328170776367,
"learning_rate": 0.00018700321464592764,
"loss": 1.5335,
"step": 410
},
{
"epoch": 0.18666666666666668,
"grad_norm": 18.00100326538086,
"learning_rate": 0.00018629233553781052,
"loss": 1.4608,
"step": 420
},
{
"epoch": 0.19111111111111112,
"grad_norm": 14.892653465270996,
"learning_rate": 0.00018556395580252458,
"loss": 1.5059,
"step": 430
},
{
"epoch": 0.19555555555555557,
"grad_norm": 10.61614990234375,
"learning_rate": 0.000184818223160051,
"loss": 1.2732,
"step": 440
},
{
"epoch": 0.2,
"grad_norm": 11.014739990234375,
"learning_rate": 0.00018405528884964952,
"loss": 1.3921,
"step": 450
},
{
"epoch": 0.20444444444444446,
"grad_norm": 9.74208927154541,
"learning_rate": 0.00018327530759918597,
"loss": 1.3523,
"step": 460
},
{
"epoch": 0.2088888888888889,
"grad_norm": 15.92335319519043,
"learning_rate": 0.0001824784375937528,
"loss": 1.4222,
"step": 470
},
{
"epoch": 0.21333333333333335,
"grad_norm": 15.541021347045898,
"learning_rate": 0.00018166484044358764,
"loss": 1.3772,
"step": 480
},
{
"epoch": 0.21777777777777776,
"grad_norm": 13.139538764953613,
"learning_rate": 0.00018083468115129834,
"loss": 1.4395,
"step": 490
},
{
"epoch": 0.2222222222222222,
"grad_norm": 21.101469039916992,
"learning_rate": 0.00017998812807839892,
"loss": 1.3891,
"step": 500
},
{
"epoch": 0.22666666666666666,
"grad_norm": 14.130121231079102,
"learning_rate": 0.00017912535291116508,
"loss": 1.3276,
"step": 510
},
{
"epoch": 0.2311111111111111,
"grad_norm": 25.728923797607422,
"learning_rate": 0.00017824653062581503,
"loss": 1.4297,
"step": 520
},
{
"epoch": 0.23555555555555555,
"grad_norm": 14.234143257141113,
"learning_rate": 0.00017735183945302322,
"loss": 1.4121,
"step": 530
},
{
"epoch": 0.24,
"grad_norm": 14.950554847717285,
"learning_rate": 0.00017644146084177406,
"loss": 1.2576,
"step": 540
},
{
"epoch": 0.24444444444444444,
"grad_norm": 9.834321022033691,
"learning_rate": 0.00017551557942256294,
"loss": 1.3496,
"step": 550
},
{
"epoch": 0.24888888888888888,
"grad_norm": 20.81060218811035,
"learning_rate": 0.00017457438296995196,
"loss": 1.2409,
"step": 560
},
{
"epoch": 0.25333333333333335,
"grad_norm": 16.057870864868164,
"learning_rate": 0.00017361806236448817,
"loss": 1.369,
"step": 570
},
{
"epoch": 0.2577777777777778,
"grad_norm": 9.714545249938965,
"learning_rate": 0.00017264681155399164,
"loss": 1.2626,
"step": 580
},
{
"epoch": 0.26222222222222225,
"grad_norm": 16.824140548706055,
"learning_rate": 0.00017166082751422177,
"loss": 1.2978,
"step": 590
},
{
"epoch": 0.26666666666666666,
"grad_norm": 13.657912254333496,
"learning_rate": 0.00017066031020892934,
"loss": 1.1963,
"step": 600
},
{
"epoch": 0.27111111111111114,
"grad_norm": 22.651229858398438,
"learning_rate": 0.00016964546254930247,
"loss": 1.1826,
"step": 610
},
{
"epoch": 0.27555555555555555,
"grad_norm": 28.09796905517578,
"learning_rate": 0.0001686164903528152,
"loss": 1.2849,
"step": 620
},
{
"epoch": 0.28,
"grad_norm": 22.29288673400879,
"learning_rate": 0.00016757360230148618,
"loss": 1.2473,
"step": 630
},
{
"epoch": 0.28444444444444444,
"grad_norm": 29.369836807250977,
"learning_rate": 0.00016651700989955682,
"loss": 1.2133,
"step": 640
},
{
"epoch": 0.28888888888888886,
"grad_norm": 10.829903602600098,
"learning_rate": 0.00016544692743059684,
"loss": 1.2379,
"step": 650
},
{
"epoch": 0.29333333333333333,
"grad_norm": 10.857136726379395,
"learning_rate": 0.0001643635719140461,
"loss": 1.1889,
"step": 660
},
{
"epoch": 0.29777777777777775,
"grad_norm": 10.762494087219238,
"learning_rate": 0.00016326716306120195,
"loss": 1.2591,
"step": 670
},
{
"epoch": 0.3022222222222222,
"grad_norm": 13.926369667053223,
"learning_rate": 0.00016215792323066012,
"loss": 1.2075,
"step": 680
},
{
"epoch": 0.30666666666666664,
"grad_norm": 10.43800163269043,
"learning_rate": 0.00016103607738321925,
"loss": 1.2076,
"step": 690
},
{
"epoch": 0.3111111111111111,
"grad_norm": 18.675508499145508,
"learning_rate": 0.0001599018530362573,
"loss": 1.2339,
"step": 700
},
{
"epoch": 0.31555555555555553,
"grad_norm": 10.020101547241211,
"learning_rate": 0.0001587554802175895,
"loss": 1.14,
"step": 710
},
{
"epoch": 0.32,
"grad_norm": 8.47937297821045,
"learning_rate": 0.0001575971914188175,
"loss": 1.145,
"step": 720
},
{
"epoch": 0.3244444444444444,
"grad_norm": 16.2773380279541,
"learning_rate": 0.00015642722154817848,
"loss": 1.1076,
"step": 730
},
{
"epoch": 0.3288888888888889,
"grad_norm": 10.47890853881836,
"learning_rate": 0.00015524580788290425,
"loss": 1.1414,
"step": 740
},
{
"epoch": 0.3333333333333333,
"grad_norm": 9.702156066894531,
"learning_rate": 0.0001540531900211,
"loss": 1.22,
"step": 750
},
{
"epoch": 0.3377777777777778,
"grad_norm": 10.579848289489746,
"learning_rate": 0.0001528496098331523,
"loss": 1.1548,
"step": 760
},
{
"epoch": 0.3422222222222222,
"grad_norm": 24.226659774780273,
"learning_rate": 0.00015163531141267628,
"loss": 1.1407,
"step": 770
},
{
"epoch": 0.3466666666666667,
"grad_norm": 11.10332202911377,
"learning_rate": 0.00015041054102701184,
"loss": 1.1642,
"step": 780
},
{
"epoch": 0.3511111111111111,
"grad_norm": 14.13973331451416,
"learning_rate": 0.00014917554706727915,
"loss": 1.1726,
"step": 790
},
{
"epoch": 0.35555555555555557,
"grad_norm": 21.75472640991211,
"learning_rate": 0.00014793057999800335,
"loss": 1.1478,
"step": 800
},
{
"epoch": 0.36,
"grad_norm": 12.123833656311035,
"learning_rate": 0.0001466758923063189,
"loss": 1.1939,
"step": 810
},
{
"epoch": 0.36444444444444446,
"grad_norm": 9.510560035705566,
"learning_rate": 0.00014541173845076323,
"loss": 1.0843,
"step": 820
},
{
"epoch": 0.3688888888888889,
"grad_norm": 17.031314849853516,
"learning_rate": 0.00014413837480967145,
"loss": 1.1181,
"step": 830
},
{
"epoch": 0.37333333333333335,
"grad_norm": 16.022037506103516,
"learning_rate": 0.00014285605962918084,
"loss": 1.1542,
"step": 840
},
{
"epoch": 0.37777777777777777,
"grad_norm": 12.77236270904541,
"learning_rate": 0.00014156505297085713,
"loss": 1.114,
"step": 850
},
{
"epoch": 0.38222222222222224,
"grad_norm": 38.819454193115234,
"learning_rate": 0.00014026561665895224,
"loss": 1.0932,
"step": 860
},
{
"epoch": 0.38666666666666666,
"grad_norm": 10.85486125946045,
"learning_rate": 0.00013895801422730473,
"loss": 1.11,
"step": 870
},
{
"epoch": 0.39111111111111113,
"grad_norm": 10.986682891845703,
"learning_rate": 0.00013764251086589353,
"loss": 1.0752,
"step": 880
},
{
"epoch": 0.39555555555555555,
"grad_norm": 7.950289726257324,
"learning_rate": 0.00013631937336705568,
"loss": 1.1817,
"step": 890
},
{
"epoch": 0.4,
"grad_norm": 7.103327751159668,
"learning_rate": 0.00013498887007137918,
"loss": 1.0813,
"step": 900
},
{
"epoch": 0.40444444444444444,
"grad_norm": 7.365835189819336,
"learning_rate": 0.0001336512708132819,
"loss": 1.0193,
"step": 910
},
{
"epoch": 0.4088888888888889,
"grad_norm": 11.938828468322754,
"learning_rate": 0.00013230684686628744,
"loss": 1.1339,
"step": 920
},
{
"epoch": 0.41333333333333333,
"grad_norm": 12.521608352661133,
"learning_rate": 0.00013095587088800902,
"loss": 1.0743,
"step": 930
},
{
"epoch": 0.4177777777777778,
"grad_norm": 9.13135051727295,
"learning_rate": 0.00012959861686485304,
"loss": 1.0734,
"step": 940
},
{
"epoch": 0.4222222222222222,
"grad_norm": 16.696514129638672,
"learning_rate": 0.0001282353600564527,
"loss": 1.1145,
"step": 950
},
{
"epoch": 0.4266666666666667,
"grad_norm": 12.382914543151855,
"learning_rate": 0.00012686637693984384,
"loss": 0.9964,
"step": 960
},
{
"epoch": 0.4311111111111111,
"grad_norm": 10.711663246154785,
"learning_rate": 0.00012549194515339344,
"loss": 1.0572,
"step": 970
},
{
"epoch": 0.43555555555555553,
"grad_norm": 13.973264694213867,
"learning_rate": 0.00012411234344049293,
"loss": 1.0616,
"step": 980
},
{
"epoch": 0.44,
"grad_norm": 10.161416053771973,
"learning_rate": 0.0001227278515930273,
"loss": 1.0561,
"step": 990
},
{
"epoch": 0.4444444444444444,
"grad_norm": 27.74120330810547,
"learning_rate": 0.00012133875039463148,
"loss": 1.1011,
"step": 1000
},
{
"epoch": 0.4488888888888889,
"grad_norm": 9.277678489685059,
"learning_rate": 0.00011994532156374574,
"loss": 1.0957,
"step": 1010
},
{
"epoch": 0.4533333333333333,
"grad_norm": 9.599855422973633,
"learning_rate": 0.00011854784769648137,
"loss": 1.0394,
"step": 1020
},
{
"epoch": 0.4577777777777778,
"grad_norm": 9.483017921447754,
"learning_rate": 0.00011714661220930833,
"loss": 0.9773,
"step": 1030
},
{
"epoch": 0.4622222222222222,
"grad_norm": 11.467011451721191,
"learning_rate": 0.00011574189928157689,
"loss": 1.0346,
"step": 1040
},
{
"epoch": 0.4666666666666667,
"grad_norm": 8.953259468078613,
"learning_rate": 0.00011433399379788387,
"loss": 1.0622,
"step": 1050
},
{
"epoch": 0.4711111111111111,
"grad_norm": 6.459799289703369,
"learning_rate": 0.00011292318129029665,
"loss": 0.9814,
"step": 1060
},
{
"epoch": 0.47555555555555556,
"grad_norm": 8.728630065917969,
"learning_rate": 0.00011150974788044521,
"loss": 1.0526,
"step": 1070
},
{
"epoch": 0.48,
"grad_norm": 7.729814529418945,
"learning_rate": 0.00011009398022149495,
"loss": 0.9997,
"step": 1080
},
{
"epoch": 0.48444444444444446,
"grad_norm": 9.42880916595459,
"learning_rate": 0.00010867616544001164,
"loss": 0.9999,
"step": 1090
},
{
"epoch": 0.4888888888888889,
"grad_norm": 8.457280158996582,
"learning_rate": 0.00010725659107773045,
"loss": 1.0464,
"step": 1100
},
{
"epoch": 0.49333333333333335,
"grad_norm": 8.715860366821289,
"learning_rate": 0.00010583554503324044,
"loss": 1.0088,
"step": 1110
},
{
"epoch": 0.49777777777777776,
"grad_norm": 6.529873847961426,
"learning_rate": 0.00010441331550359712,
"loss": 1.0749,
"step": 1120
},
{
"epoch": 0.5022222222222222,
"grad_norm": 7.668039798736572,
"learning_rate": 0.0001029901909258742,
"loss": 1.026,
"step": 1130
},
{
"epoch": 0.5066666666666667,
"grad_norm": 8.81876277923584,
"learning_rate": 0.00010156645991866677,
"loss": 1.0293,
"step": 1140
},
{
"epoch": 0.5111111111111111,
"grad_norm": 8.55112075805664,
"learning_rate": 0.00010014241122355762,
"loss": 1.0282,
"step": 1150
},
{
"epoch": 0.5155555555555555,
"grad_norm": 9.484146118164062,
"learning_rate": 9.871833364655865e-05,
"loss": 0.9964,
"step": 1160
},
{
"epoch": 0.52,
"grad_norm": 10.939757347106934,
"learning_rate": 9.729451599953917e-05,
"loss": 1.0519,
"step": 1170
},
{
"epoch": 0.5244444444444445,
"grad_norm": 9.229081153869629,
"learning_rate": 9.587124704165302e-05,
"loss": 1.0511,
"step": 1180
},
{
"epoch": 0.5288888888888889,
"grad_norm": 12.294286727905273,
"learning_rate": 9.44488154207766e-05,
"loss": 1.0302,
"step": 1190
},
{
"epoch": 0.5333333333333333,
"grad_norm": 11.527563095092773,
"learning_rate": 9.302750961496888e-05,
"loss": 1.0333,
"step": 1200
},
{
"epoch": 0.5377777777777778,
"grad_norm": 10.483113288879395,
"learning_rate": 9.160761787396665e-05,
"loss": 0.9749,
"step": 1210
},
{
"epoch": 0.5422222222222223,
"grad_norm": 9.348003387451172,
"learning_rate": 9.018942816072545e-05,
"loss": 0.9837,
"step": 1220
},
{
"epoch": 0.5466666666666666,
"grad_norm": 9.57206916809082,
"learning_rate": 8.87732280930188e-05,
"loss": 1.0002,
"step": 1230
},
{
"epoch": 0.5511111111111111,
"grad_norm": 9.370091438293457,
"learning_rate": 8.735930488510774e-05,
"loss": 1.0049,
"step": 1240
},
{
"epoch": 0.5555555555555556,
"grad_norm": 9.066927909851074,
"learning_rate": 8.594794528949183e-05,
"loss": 0.9549,
"step": 1250
},
{
"epoch": 0.56,
"grad_norm": 13.580326080322266,
"learning_rate": 8.453943553875392e-05,
"loss": 1.0505,
"step": 1260
},
{
"epoch": 0.5644444444444444,
"grad_norm": 9.729880332946777,
"learning_rate": 8.313406128751049e-05,
"loss": 1.0413,
"step": 1270
},
{
"epoch": 0.5688888888888889,
"grad_norm": 10.354995727539062,
"learning_rate": 8.173210755447905e-05,
"loss": 1.033,
"step": 1280
},
{
"epoch": 0.5733333333333334,
"grad_norm": 10.784231185913086,
"learning_rate": 8.033385866467444e-05,
"loss": 1.0747,
"step": 1290
},
{
"epoch": 0.5777777777777777,
"grad_norm": 8.267210006713867,
"learning_rate": 7.893959819174619e-05,
"loss": 0.9777,
"step": 1300
},
{
"epoch": 0.5822222222222222,
"grad_norm": 8.181448936462402,
"learning_rate": 7.754960890046785e-05,
"loss": 0.9738,
"step": 1310
},
{
"epoch": 0.5866666666666667,
"grad_norm": 6.555637836456299,
"learning_rate": 7.616417268939037e-05,
"loss": 0.9659,
"step": 1320
},
{
"epoch": 0.5911111111111111,
"grad_norm": 8.430340766906738,
"learning_rate": 7.47835705336716e-05,
"loss": 0.999,
"step": 1330
},
{
"epoch": 0.5955555555555555,
"grad_norm": 7.698472023010254,
"learning_rate": 7.340808242809264e-05,
"loss": 0.9666,
"step": 1340
},
{
"epoch": 0.6,
"grad_norm": 6.320609092712402,
"learning_rate": 7.203798733027304e-05,
"loss": 0.9954,
"step": 1350
},
{
"epoch": 0.6044444444444445,
"grad_norm": 7.057352542877197,
"learning_rate": 7.067356310409659e-05,
"loss": 0.9971,
"step": 1360
},
{
"epoch": 0.6088888888888889,
"grad_norm": 10.81286334991455,
"learning_rate": 6.931508646335874e-05,
"loss": 0.9931,
"step": 1370
},
{
"epoch": 0.6133333333333333,
"grad_norm": 7.427656173706055,
"learning_rate": 6.796283291564722e-05,
"loss": 0.9491,
"step": 1380
},
{
"epoch": 0.6177777777777778,
"grad_norm": 7.356409072875977,
"learning_rate": 6.66170767064675e-05,
"loss": 1.0202,
"step": 1390
},
{
"epoch": 0.6222222222222222,
"grad_norm": 8.578875541687012,
"learning_rate": 6.527809076362399e-05,
"loss": 1.0542,
"step": 1400
},
{
"epoch": 0.6266666666666667,
"grad_norm": 8.644619941711426,
"learning_rate": 6.394614664186862e-05,
"loss": 1.0267,
"step": 1410
},
{
"epoch": 0.6311111111111111,
"grad_norm": 9.160662651062012,
"learning_rate": 6.262151446782785e-05,
"loss": 0.9914,
"step": 1420
},
{
"epoch": 0.6355555555555555,
"grad_norm": 7.767285346984863,
"learning_rate": 6.130446288521915e-05,
"loss": 0.987,
"step": 1430
},
{
"epoch": 0.64,
"grad_norm": 12.88818073272705,
"learning_rate": 5.999525900036855e-05,
"loss": 0.9676,
"step": 1440
},
{
"epoch": 0.6444444444444445,
"grad_norm": 10.068846702575684,
"learning_rate": 5.86941683280398e-05,
"loss": 0.9942,
"step": 1450
},
{
"epoch": 0.6488888888888888,
"grad_norm": 8.70479679107666,
"learning_rate": 5.7401454737586055e-05,
"loss": 0.9848,
"step": 1460
},
{
"epoch": 0.6533333333333333,
"grad_norm": 10.635972023010254,
"learning_rate": 5.6117380399435826e-05,
"loss": 0.9892,
"step": 1470
},
{
"epoch": 0.6577777777777778,
"grad_norm": 6.84842586517334,
"learning_rate": 5.484220573192307e-05,
"loss": 0.961,
"step": 1480
},
{
"epoch": 0.6622222222222223,
"grad_norm": 6.793154716491699,
"learning_rate": 5.3576189348472526e-05,
"loss": 0.9772,
"step": 1490
},
{
"epoch": 0.6666666666666666,
"grad_norm": 10.692822456359863,
"learning_rate": 5.231958800515164e-05,
"loss": 1.0044,
"step": 1500
},
{
"epoch": 0.6711111111111111,
"grad_norm": 7.949609279632568,
"learning_rate": 5.107265654859855e-05,
"loss": 1.0194,
"step": 1510
},
{
"epoch": 0.6755555555555556,
"grad_norm": 8.028242111206055,
"learning_rate": 4.983564786433763e-05,
"loss": 0.9705,
"step": 1520
},
{
"epoch": 0.68,
"grad_norm": 8.18526840209961,
"learning_rate": 4.860881282549285e-05,
"loss": 0.9802,
"step": 1530
},
{
"epoch": 0.6844444444444444,
"grad_norm": 9.321311950683594,
"learning_rate": 4.739240024190904e-05,
"loss": 0.9649,
"step": 1540
},
{
"epoch": 0.6888888888888889,
"grad_norm": 10.959417343139648,
"learning_rate": 4.618665680969163e-05,
"loss": 0.9957,
"step": 1550
},
{
"epoch": 0.6933333333333334,
"grad_norm": 9.302586555480957,
"learning_rate": 4.49918270611752e-05,
"loss": 0.9833,
"step": 1560
},
{
"epoch": 0.6977777777777778,
"grad_norm": 7.047448635101318,
"learning_rate": 4.380815331533088e-05,
"loss": 1.0179,
"step": 1570
},
{
"epoch": 0.7022222222222222,
"grad_norm": 9.307101249694824,
"learning_rate": 4.2635875628622345e-05,
"loss": 0.9883,
"step": 1580
},
{
"epoch": 0.7066666666666667,
"grad_norm": 8.306827545166016,
"learning_rate": 4.147523174632103e-05,
"loss": 0.984,
"step": 1590
},
{
"epoch": 0.7111111111111111,
"grad_norm": 9.073155403137207,
"learning_rate": 4.032645705428985e-05,
"loss": 0.9916,
"step": 1600
},
{
"epoch": 0.7155555555555555,
"grad_norm": 11.148294448852539,
"learning_rate": 3.9189784531245334e-05,
"loss": 0.993,
"step": 1610
},
{
"epoch": 0.72,
"grad_norm": 7.878681659698486,
"learning_rate": 3.806544470150831e-05,
"loss": 0.9733,
"step": 1620
},
{
"epoch": 0.7244444444444444,
"grad_norm": 9.204869270324707,
"learning_rate": 3.6953665588251984e-05,
"loss": 0.9689,
"step": 1630
},
{
"epoch": 0.7288888888888889,
"grad_norm": 8.391727447509766,
"learning_rate": 3.585467266725737e-05,
"loss": 0.9782,
"step": 1640
},
{
"epoch": 0.7333333333333333,
"grad_norm": 6.572085857391357,
"learning_rate": 3.4768688821185566e-05,
"loss": 0.9548,
"step": 1650
},
{
"epoch": 0.7377777777777778,
"grad_norm": 9.943083763122559,
"learning_rate": 3.3695934294375544e-05,
"loss": 0.9904,
"step": 1660
},
{
"epoch": 0.7422222222222222,
"grad_norm": 8.165312767028809,
"learning_rate": 3.263662664817728e-05,
"loss": 0.9728,
"step": 1670
},
{
"epoch": 0.7466666666666667,
"grad_norm": 9.635257720947266,
"learning_rate": 3.15909807168291e-05,
"loss": 0.961,
"step": 1680
},
{
"epoch": 0.7511111111111111,
"grad_norm": 7.636417865753174,
"learning_rate": 3.055920856388779e-05,
"loss": 0.9403,
"step": 1690
},
{
"epoch": 0.7555555555555555,
"grad_norm": 6.770568370819092,
"learning_rate": 2.95415194392207e-05,
"loss": 0.9484,
"step": 1700
},
{
"epoch": 0.76,
"grad_norm": 7.254674434661865,
"learning_rate": 2.8538119736568845e-05,
"loss": 0.9701,
"step": 1710
},
{
"epoch": 0.7644444444444445,
"grad_norm": 8.287463188171387,
"learning_rate": 2.7549212951688598e-05,
"loss": 0.9591,
"step": 1720
},
{
"epoch": 0.7688888888888888,
"grad_norm": 8.489920616149902,
"learning_rate": 2.6574999641081812e-05,
"loss": 0.9285,
"step": 1730
},
{
"epoch": 0.7733333333333333,
"grad_norm": 7.725697994232178,
"learning_rate": 2.561567738132149e-05,
"loss": 0.8912,
"step": 1740
},
{
"epoch": 0.7777777777777778,
"grad_norm": 8.986964225769043,
"learning_rate": 2.467144072898202e-05,
"loss": 0.9386,
"step": 1750
},
{
"epoch": 0.7822222222222223,
"grad_norm": 8.926631927490234,
"learning_rate": 2.3742481181182065e-05,
"loss": 0.9224,
"step": 1760
},
{
"epoch": 0.7866666666666666,
"grad_norm": 7.921815395355225,
"learning_rate": 2.2828987136747505e-05,
"loss": 0.9393,
"step": 1770
},
{
"epoch": 0.7911111111111111,
"grad_norm": 6.680901050567627,
"learning_rate": 2.193114385800309e-05,
"loss": 0.9359,
"step": 1780
},
{
"epoch": 0.7955555555555556,
"grad_norm": 6.957186698913574,
"learning_rate": 2.104913343320013e-05,
"loss": 0.9285,
"step": 1790
},
{
"epoch": 0.8,
"grad_norm": 7.6232008934021,
"learning_rate": 2.0183134739587807e-05,
"loss": 0.9083,
"step": 1800
},
{
"epoch": 0.8044444444444444,
"grad_norm": 6.6202898025512695,
"learning_rate": 1.9333323407135652e-05,
"loss": 0.9497,
"step": 1810
},
{
"epoch": 0.8088888888888889,
"grad_norm": 8.43086051940918,
"learning_rate": 1.8499871782914823e-05,
"loss": 0.8944,
"step": 1820
},
{
"epoch": 0.8133333333333334,
"grad_norm": 7.180028915405273,
"learning_rate": 1.7682948896145037e-05,
"loss": 0.9319,
"step": 1830
},
{
"epoch": 0.8177777777777778,
"grad_norm": 9.3683500289917,
"learning_rate": 1.688272042391421e-05,
"loss": 0.9467,
"step": 1840
},
{
"epoch": 0.8222222222222222,
"grad_norm": 7.685975551605225,
"learning_rate": 1.609934865757835e-05,
"loss": 0.8837,
"step": 1850
},
{
"epoch": 0.8266666666666667,
"grad_norm": 6.820009231567383,
"learning_rate": 1.5332992469847595e-05,
"loss": 0.8969,
"step": 1860
},
{
"epoch": 0.8311111111111111,
"grad_norm": 6.4122161865234375,
"learning_rate": 1.4583807282566109e-05,
"loss": 0.892,
"step": 1870
},
{
"epoch": 0.8355555555555556,
"grad_norm": 11.386307716369629,
"learning_rate": 1.3851945035191271e-05,
"loss": 0.9494,
"step": 1880
},
{
"epoch": 0.84,
"grad_norm": 7.07219934463501,
"learning_rate": 1.3137554153979648e-05,
"loss": 0.9254,
"step": 1890
},
{
"epoch": 0.8444444444444444,
"grad_norm": 8.010754585266113,
"learning_rate": 1.2440779521885026e-05,
"loss": 0.8829,
"step": 1900
},
{
"epoch": 0.8488888888888889,
"grad_norm": 7.019892692565918,
"learning_rate": 1.1761762449175362e-05,
"loss": 0.9155,
"step": 1910
},
{
"epoch": 0.8533333333333334,
"grad_norm": 9.830785751342773,
"learning_rate": 1.1100640644774174e-05,
"loss": 0.9572,
"step": 1920
},
{
"epoch": 0.8577777777777778,
"grad_norm": 8.173066139221191,
"learning_rate": 1.0457548188332156e-05,
"loss": 0.9465,
"step": 1930
},
{
"epoch": 0.8622222222222222,
"grad_norm": 7.015248775482178,
"learning_rate": 9.83261550303518e-06,
"loss": 0.8799,
"step": 1940
},
{
"epoch": 0.8666666666666667,
"grad_norm": 7.390592575073242,
"learning_rate": 9.225969329153572e-06,
"loss": 0.9065,
"step": 1950
},
{
"epoch": 0.8711111111111111,
"grad_norm": 8.248661994934082,
"learning_rate": 8.637732698338353e-06,
"loss": 0.8821,
"step": 1960
},
{
"epoch": 0.8755555555555555,
"grad_norm": 7.508656024932861,
"learning_rate": 8.068024908669658e-06,
"loss": 0.9156,
"step": 1970
},
{
"epoch": 0.88,
"grad_norm": 11.526093482971191,
"learning_rate": 7.516961500462438e-06,
"loss": 0.9091,
"step": 1980
},
{
"epoch": 0.8844444444444445,
"grad_norm": 7.947896480560303,
"learning_rate": 6.984654232833998e-06,
"loss": 0.9023,
"step": 1990
},
{
"epoch": 0.8888888888888888,
"grad_norm": 8.093976974487305,
"learning_rate": 6.471211061038695e-06,
"loss": 0.8878,
"step": 2000
},
{
"epoch": 0.8933333333333333,
"grad_norm": 11.323365211486816,
"learning_rate": 5.976736114573867e-06,
"loss": 0.9262,
"step": 2010
},
{
"epoch": 0.8977777777777778,
"grad_norm": 8.999126434326172,
"learning_rate": 5.501329676061662e-06,
"loss": 0.878,
"step": 2020
},
{
"epoch": 0.9022222222222223,
"grad_norm": 8.701828956604004,
"learning_rate": 5.045088160911227e-06,
"loss": 0.8847,
"step": 2030
},
{
"epoch": 0.9066666666666666,
"grad_norm": 6.862762451171875,
"learning_rate": 4.60810409776491e-06,
"loss": 0.8999,
"step": 2040
},
{
"epoch": 0.9111111111111111,
"grad_norm": 7.2732672691345215,
"learning_rate": 4.190466109733004e-06,
"loss": 0.9271,
"step": 2050
},
{
"epoch": 0.9155555555555556,
"grad_norm": 6.5058979988098145,
"learning_rate": 3.7922588964203533e-06,
"loss": 0.8799,
"step": 2060
},
{
"epoch": 0.92,
"grad_norm": 7.603868007659912,
"learning_rate": 3.4135632167487274e-06,
"loss": 0.9081,
"step": 2070
},
{
"epoch": 0.9244444444444444,
"grad_norm": 7.810312271118164,
"learning_rate": 3.054455872578421e-06,
"loss": 0.9159,
"step": 2080
},
{
"epoch": 0.9288888888888889,
"grad_norm": 8.112349510192871,
"learning_rate": 2.71500969313242e-06,
"loss": 0.8864,
"step": 2090
},
{
"epoch": 0.9333333333333333,
"grad_norm": 8.802750587463379,
"learning_rate": 2.3952935202260608e-06,
"loss": 0.8618,
"step": 2100
},
{
"epoch": 0.9377777777777778,
"grad_norm": 7.140369892120361,
"learning_rate": 2.095372194305578e-06,
"loss": 0.8829,
"step": 2110
},
{
"epoch": 0.9422222222222222,
"grad_norm": 8.534485816955566,
"learning_rate": 1.815306541298023e-06,
"loss": 0.8757,
"step": 2120
},
{
"epoch": 0.9466666666666667,
"grad_norm": 10.03408145904541,
"learning_rate": 1.55515336027533e-06,
"loss": 0.8699,
"step": 2130
},
{
"epoch": 0.9511111111111111,
"grad_norm": 10.744848251342773,
"learning_rate": 1.3149654119351874e-06,
"loss": 0.9025,
"step": 2140
},
{
"epoch": 0.9555555555555556,
"grad_norm": 12.541043281555176,
"learning_rate": 1.0947914079008193e-06,
"loss": 0.9111,
"step": 2150
},
{
"epoch": 0.96,
"grad_norm": 9.694021224975586,
"learning_rate": 8.946760008419208e-07,
"loss": 0.8739,
"step": 2160
},
{
"epoch": 0.9644444444444444,
"grad_norm": 8.234989166259766,
"learning_rate": 7.146597754188578e-07,
"loss": 0.8864,
"step": 2170
},
{
"epoch": 0.9688888888888889,
"grad_norm": 8.109750747680664,
"learning_rate": 5.547792400518171e-07,
"loss": 0.8951,
"step": 2180
},
{
"epoch": 0.9733333333333334,
"grad_norm": 5.8855180740356445,
"learning_rate": 4.150668195166962e-07,
"loss": 0.9224,
"step": 2190
},
{
"epoch": 0.9777777777777777,
"grad_norm": 6.274494171142578,
"learning_rate": 2.955508483691416e-07,
"loss": 0.867,
"step": 2200
},
{
"epoch": 0.9822222222222222,
"grad_norm": 9.63640022277832,
"learning_rate": 1.9625556519811128e-07,
"loss": 0.8781,
"step": 2210
},
{
"epoch": 0.9866666666666667,
"grad_norm": 8.013216018676758,
"learning_rate": 1.1720110771015202e-07,
"loss": 0.9555,
"step": 2220
},
{
"epoch": 0.9911111111111112,
"grad_norm": 11.773499488830566,
"learning_rate": 5.840350864533273e-08,
"loss": 0.8768,
"step": 2230
},
{
"epoch": 0.9955555555555555,
"grad_norm": 6.226689338684082,
"learning_rate": 1.9874692525745365e-08,
"loss": 0.8743,
"step": 2240
},
{
"epoch": 1.0,
"grad_norm": 6.99469518661499,
"learning_rate": 1.6224732370728746e-09,
"loss": 0.8964,
"step": 2250
}
],
"logging_steps": 10,
"max_steps": 2250,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 300,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.137653654388736e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}