gemma2b-coding-gpt4o-100k / trainer_state.json
chansung's picture
Model save
2657f7f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.978768577494693,
"eval_steps": 500,
"global_step": 2350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004246284501061571,
"grad_norm": 3.09375,
"learning_rate": 8.510638297872341e-07,
"loss": 1.9544,
"step": 1
},
{
"epoch": 0.021231422505307854,
"grad_norm": 4.03125,
"learning_rate": 4.255319148936171e-06,
"loss": 1.9708,
"step": 5
},
{
"epoch": 0.04246284501061571,
"grad_norm": 2.09375,
"learning_rate": 8.510638297872341e-06,
"loss": 1.9597,
"step": 10
},
{
"epoch": 0.06369426751592357,
"grad_norm": 8.25,
"learning_rate": 1.2765957446808511e-05,
"loss": 1.9264,
"step": 15
},
{
"epoch": 0.08492569002123142,
"grad_norm": 1.6953125,
"learning_rate": 1.7021276595744682e-05,
"loss": 1.8338,
"step": 20
},
{
"epoch": 0.10615711252653928,
"grad_norm": 2.453125,
"learning_rate": 2.1276595744680852e-05,
"loss": 1.7511,
"step": 25
},
{
"epoch": 0.12738853503184713,
"grad_norm": 2.609375,
"learning_rate": 2.5531914893617022e-05,
"loss": 1.6446,
"step": 30
},
{
"epoch": 0.14861995753715498,
"grad_norm": 2.4375,
"learning_rate": 2.9787234042553192e-05,
"loss": 1.5726,
"step": 35
},
{
"epoch": 0.16985138004246284,
"grad_norm": 6.21875,
"learning_rate": 3.4042553191489365e-05,
"loss": 1.4611,
"step": 40
},
{
"epoch": 0.1910828025477707,
"grad_norm": 5.03125,
"learning_rate": 3.829787234042553e-05,
"loss": 1.3599,
"step": 45
},
{
"epoch": 0.21231422505307856,
"grad_norm": 1.03125,
"learning_rate": 4.2553191489361704e-05,
"loss": 1.2565,
"step": 50
},
{
"epoch": 0.23354564755838642,
"grad_norm": 0.75,
"learning_rate": 4.680851063829788e-05,
"loss": 1.1707,
"step": 55
},
{
"epoch": 0.25477707006369427,
"grad_norm": 1.34375,
"learning_rate": 5.1063829787234044e-05,
"loss": 1.0829,
"step": 60
},
{
"epoch": 0.2760084925690021,
"grad_norm": 0.578125,
"learning_rate": 5.531914893617022e-05,
"loss": 0.9955,
"step": 65
},
{
"epoch": 0.29723991507430997,
"grad_norm": 0.3046875,
"learning_rate": 5.9574468085106384e-05,
"loss": 0.9635,
"step": 70
},
{
"epoch": 0.3184713375796178,
"grad_norm": 0.29296875,
"learning_rate": 6.382978723404256e-05,
"loss": 0.9258,
"step": 75
},
{
"epoch": 0.33970276008492567,
"grad_norm": 0.8515625,
"learning_rate": 6.808510638297873e-05,
"loss": 0.8948,
"step": 80
},
{
"epoch": 0.3609341825902335,
"grad_norm": 0.6484375,
"learning_rate": 7.23404255319149e-05,
"loss": 0.86,
"step": 85
},
{
"epoch": 0.3821656050955414,
"grad_norm": 0.77734375,
"learning_rate": 7.659574468085106e-05,
"loss": 0.8485,
"step": 90
},
{
"epoch": 0.4033970276008493,
"grad_norm": 0.44921875,
"learning_rate": 8.085106382978723e-05,
"loss": 0.8287,
"step": 95
},
{
"epoch": 0.42462845010615713,
"grad_norm": 0.2734375,
"learning_rate": 8.510638297872341e-05,
"loss": 0.8144,
"step": 100
},
{
"epoch": 0.445859872611465,
"grad_norm": 0.4453125,
"learning_rate": 8.936170212765958e-05,
"loss": 0.7988,
"step": 105
},
{
"epoch": 0.46709129511677283,
"grad_norm": 0.58203125,
"learning_rate": 9.361702127659576e-05,
"loss": 0.7956,
"step": 110
},
{
"epoch": 0.4883227176220807,
"grad_norm": 0.53125,
"learning_rate": 9.787234042553192e-05,
"loss": 0.7839,
"step": 115
},
{
"epoch": 0.5095541401273885,
"grad_norm": 0.51953125,
"learning_rate": 0.00010212765957446809,
"loss": 0.7604,
"step": 120
},
{
"epoch": 0.5307855626326964,
"grad_norm": 0.310546875,
"learning_rate": 0.00010638297872340425,
"loss": 0.7615,
"step": 125
},
{
"epoch": 0.5520169851380042,
"grad_norm": 1.421875,
"learning_rate": 0.00011063829787234043,
"loss": 0.7599,
"step": 130
},
{
"epoch": 0.5732484076433121,
"grad_norm": 1.203125,
"learning_rate": 0.00011489361702127661,
"loss": 0.7407,
"step": 135
},
{
"epoch": 0.5944798301486199,
"grad_norm": 1.1640625,
"learning_rate": 0.00011914893617021277,
"loss": 0.7518,
"step": 140
},
{
"epoch": 0.6157112526539278,
"grad_norm": 0.380859375,
"learning_rate": 0.00012340425531914893,
"loss": 0.7483,
"step": 145
},
{
"epoch": 0.6369426751592356,
"grad_norm": 0.546875,
"learning_rate": 0.00012765957446808513,
"loss": 0.7278,
"step": 150
},
{
"epoch": 0.6581740976645435,
"grad_norm": 1.0390625,
"learning_rate": 0.00013191489361702127,
"loss": 0.7319,
"step": 155
},
{
"epoch": 0.6794055201698513,
"grad_norm": 1.109375,
"learning_rate": 0.00013617021276595746,
"loss": 0.731,
"step": 160
},
{
"epoch": 0.7006369426751592,
"grad_norm": 0.578125,
"learning_rate": 0.00014042553191489363,
"loss": 0.7207,
"step": 165
},
{
"epoch": 0.721868365180467,
"grad_norm": 0.3359375,
"learning_rate": 0.0001446808510638298,
"loss": 0.7271,
"step": 170
},
{
"epoch": 0.7430997876857749,
"grad_norm": 0.462890625,
"learning_rate": 0.00014893617021276596,
"loss": 0.7111,
"step": 175
},
{
"epoch": 0.7643312101910829,
"grad_norm": 0.59765625,
"learning_rate": 0.00015319148936170213,
"loss": 0.7099,
"step": 180
},
{
"epoch": 0.7855626326963907,
"grad_norm": 0.5,
"learning_rate": 0.00015744680851063832,
"loss": 0.7114,
"step": 185
},
{
"epoch": 0.8067940552016986,
"grad_norm": 0.267578125,
"learning_rate": 0.00016170212765957446,
"loss": 0.7038,
"step": 190
},
{
"epoch": 0.8280254777070064,
"grad_norm": 0.498046875,
"learning_rate": 0.00016595744680851065,
"loss": 0.7097,
"step": 195
},
{
"epoch": 0.8492569002123143,
"grad_norm": 0.75390625,
"learning_rate": 0.00017021276595744682,
"loss": 0.7065,
"step": 200
},
{
"epoch": 0.8704883227176221,
"grad_norm": 0.267578125,
"learning_rate": 0.00017446808510638298,
"loss": 0.6975,
"step": 205
},
{
"epoch": 0.89171974522293,
"grad_norm": 0.74609375,
"learning_rate": 0.00017872340425531915,
"loss": 0.7038,
"step": 210
},
{
"epoch": 0.9129511677282378,
"grad_norm": 0.8203125,
"learning_rate": 0.00018297872340425532,
"loss": 0.6929,
"step": 215
},
{
"epoch": 0.9341825902335457,
"grad_norm": 0.265625,
"learning_rate": 0.0001872340425531915,
"loss": 0.7061,
"step": 220
},
{
"epoch": 0.9554140127388535,
"grad_norm": 0.765625,
"learning_rate": 0.00019148936170212768,
"loss": 0.7031,
"step": 225
},
{
"epoch": 0.9766454352441614,
"grad_norm": 0.66015625,
"learning_rate": 0.00019574468085106384,
"loss": 0.6968,
"step": 230
},
{
"epoch": 0.9978768577494692,
"grad_norm": 0.71875,
"learning_rate": 0.0002,
"loss": 0.6871,
"step": 235
},
{
"epoch": 0.9978768577494692,
"eval_loss": 1.4013111591339111,
"eval_runtime": 0.536,
"eval_samples_per_second": 9.328,
"eval_steps_per_second": 1.866,
"step": 235
},
{
"epoch": 1.019108280254777,
"grad_norm": 0.78125,
"learning_rate": 0.00019999724204599747,
"loss": 0.6759,
"step": 240
},
{
"epoch": 1.040339702760085,
"grad_norm": 0.54296875,
"learning_rate": 0.00019998896833611603,
"loss": 0.673,
"step": 245
},
{
"epoch": 1.0615711252653928,
"grad_norm": 0.294921875,
"learning_rate": 0.0001999751793267259,
"loss": 0.6671,
"step": 250
},
{
"epoch": 1.0828025477707006,
"grad_norm": 0.298828125,
"learning_rate": 0.0001999558757784162,
"loss": 0.6747,
"step": 255
},
{
"epoch": 1.1040339702760085,
"grad_norm": 0.53515625,
"learning_rate": 0.0001999310587559529,
"loss": 0.669,
"step": 260
},
{
"epoch": 1.1252653927813163,
"grad_norm": 0.298828125,
"learning_rate": 0.00019990072962822007,
"loss": 0.6718,
"step": 265
},
{
"epoch": 1.1464968152866242,
"grad_norm": 0.326171875,
"learning_rate": 0.00019986489006814452,
"loss": 0.6729,
"step": 270
},
{
"epoch": 1.167728237791932,
"grad_norm": 0.28125,
"learning_rate": 0.00019982354205260347,
"loss": 0.6658,
"step": 275
},
{
"epoch": 1.1889596602972399,
"grad_norm": 0.2431640625,
"learning_rate": 0.00019977668786231534,
"loss": 0.6694,
"step": 280
},
{
"epoch": 1.2101910828025477,
"grad_norm": 0.2890625,
"learning_rate": 0.00019972433008171416,
"loss": 0.6727,
"step": 285
},
{
"epoch": 1.2314225053078556,
"grad_norm": 0.306640625,
"learning_rate": 0.00019966647159880703,
"loss": 0.6608,
"step": 290
},
{
"epoch": 1.2526539278131634,
"grad_norm": 0.765625,
"learning_rate": 0.00019960311560501454,
"loss": 0.6662,
"step": 295
},
{
"epoch": 1.2738853503184713,
"grad_norm": 0.7421875,
"learning_rate": 0.0001995342655949951,
"loss": 0.6639,
"step": 300
},
{
"epoch": 1.2951167728237791,
"grad_norm": 0.78125,
"learning_rate": 0.00019945992536645187,
"loss": 0.6639,
"step": 305
},
{
"epoch": 1.316348195329087,
"grad_norm": 0.4140625,
"learning_rate": 0.0001993800990199235,
"loss": 0.6651,
"step": 310
},
{
"epoch": 1.3375796178343948,
"grad_norm": 0.421875,
"learning_rate": 0.0001992947909585578,
"loss": 0.6534,
"step": 315
},
{
"epoch": 1.3588110403397027,
"grad_norm": 0.216796875,
"learning_rate": 0.000199204005887869,
"loss": 0.6569,
"step": 320
},
{
"epoch": 1.3800424628450108,
"grad_norm": 0.29296875,
"learning_rate": 0.000199107748815478,
"loss": 0.6577,
"step": 325
},
{
"epoch": 1.4012738853503186,
"grad_norm": 0.21875,
"learning_rate": 0.00019900602505083648,
"loss": 0.6524,
"step": 330
},
{
"epoch": 1.4225053078556265,
"grad_norm": 0.23046875,
"learning_rate": 0.0001988988402049336,
"loss": 0.6531,
"step": 335
},
{
"epoch": 1.4437367303609343,
"grad_norm": 0.216796875,
"learning_rate": 0.00019878620018998696,
"loss": 0.6544,
"step": 340
},
{
"epoch": 1.4649681528662422,
"grad_norm": 0.25390625,
"learning_rate": 0.00019866811121911607,
"loss": 0.6462,
"step": 345
},
{
"epoch": 1.48619957537155,
"grad_norm": 0.3984375,
"learning_rate": 0.000198544579806,
"loss": 0.6544,
"step": 350
},
{
"epoch": 1.5074309978768579,
"grad_norm": 0.51171875,
"learning_rate": 0.0001984156127645178,
"loss": 0.6337,
"step": 355
},
{
"epoch": 1.5286624203821657,
"grad_norm": 0.412109375,
"learning_rate": 0.00019828121720837286,
"loss": 0.6543,
"step": 360
},
{
"epoch": 1.5498938428874736,
"grad_norm": 0.36328125,
"learning_rate": 0.00019814140055070042,
"loss": 0.647,
"step": 365
},
{
"epoch": 1.5711252653927814,
"grad_norm": 0.271484375,
"learning_rate": 0.0001979961705036587,
"loss": 0.6368,
"step": 370
},
{
"epoch": 1.5923566878980893,
"grad_norm": 0.33984375,
"learning_rate": 0.00019784553507800349,
"loss": 0.6504,
"step": 375
},
{
"epoch": 1.6135881104033971,
"grad_norm": 0.65234375,
"learning_rate": 0.00019768950258264623,
"loss": 0.6427,
"step": 380
},
{
"epoch": 1.634819532908705,
"grad_norm": 0.359375,
"learning_rate": 0.0001975280816241959,
"loss": 0.6411,
"step": 385
},
{
"epoch": 1.6560509554140128,
"grad_norm": 0.259765625,
"learning_rate": 0.00019736128110648407,
"loss": 0.6412,
"step": 390
},
{
"epoch": 1.6772823779193207,
"grad_norm": 0.23046875,
"learning_rate": 0.0001971891102300738,
"loss": 0.6497,
"step": 395
},
{
"epoch": 1.6985138004246285,
"grad_norm": 0.24609375,
"learning_rate": 0.00019701157849175228,
"loss": 0.6419,
"step": 400
},
{
"epoch": 1.7197452229299364,
"grad_norm": 0.2294921875,
"learning_rate": 0.00019682869568400684,
"loss": 0.6521,
"step": 405
},
{
"epoch": 1.7409766454352442,
"grad_norm": 0.2470703125,
"learning_rate": 0.00019664047189448493,
"loss": 0.641,
"step": 410
},
{
"epoch": 1.762208067940552,
"grad_norm": 0.431640625,
"learning_rate": 0.00019644691750543767,
"loss": 0.6522,
"step": 415
},
{
"epoch": 1.78343949044586,
"grad_norm": 0.46875,
"learning_rate": 0.00019624804319314705,
"loss": 0.6581,
"step": 420
},
{
"epoch": 1.8046709129511678,
"grad_norm": 0.2275390625,
"learning_rate": 0.00019604385992733715,
"loss": 0.6452,
"step": 425
},
{
"epoch": 1.8259023354564756,
"grad_norm": 0.546875,
"learning_rate": 0.00019583437897056915,
"loss": 0.6368,
"step": 430
},
{
"epoch": 1.8471337579617835,
"grad_norm": 0.447265625,
"learning_rate": 0.00019561961187761985,
"loss": 0.6457,
"step": 435
},
{
"epoch": 1.8683651804670913,
"grad_norm": 0.48046875,
"learning_rate": 0.00019539957049484458,
"loss": 0.6277,
"step": 440
},
{
"epoch": 1.8895966029723992,
"grad_norm": 0.205078125,
"learning_rate": 0.00019517426695952358,
"loss": 0.6305,
"step": 445
},
{
"epoch": 1.910828025477707,
"grad_norm": 0.66796875,
"learning_rate": 0.0001949437136991925,
"loss": 0.6329,
"step": 450
},
{
"epoch": 1.9320594479830149,
"grad_norm": 0.34765625,
"learning_rate": 0.00019470792343095718,
"loss": 0.6454,
"step": 455
},
{
"epoch": 1.9532908704883227,
"grad_norm": 0.55078125,
"learning_rate": 0.0001944669091607919,
"loss": 0.6292,
"step": 460
},
{
"epoch": 1.9745222929936306,
"grad_norm": 2.125,
"learning_rate": 0.00019422068418282202,
"loss": 0.6342,
"step": 465
},
{
"epoch": 1.9957537154989384,
"grad_norm": 3.875,
"learning_rate": 0.00019396926207859084,
"loss": 0.6707,
"step": 470
},
{
"epoch": 2.0,
"eval_loss": 1.399332880973816,
"eval_runtime": 0.4998,
"eval_samples_per_second": 10.003,
"eval_steps_per_second": 2.001,
"step": 471
},
{
"epoch": 2.0169851380042463,
"grad_norm": 0.33203125,
"learning_rate": 0.00019371265671631037,
"loss": 0.6274,
"step": 475
},
{
"epoch": 2.038216560509554,
"grad_norm": 0.4453125,
"learning_rate": 0.00019345088225009626,
"loss": 0.6217,
"step": 480
},
{
"epoch": 2.059447983014862,
"grad_norm": 0.2314453125,
"learning_rate": 0.0001931839531191873,
"loss": 0.6253,
"step": 485
},
{
"epoch": 2.08067940552017,
"grad_norm": 0.2421875,
"learning_rate": 0.00019291188404714878,
"loss": 0.619,
"step": 490
},
{
"epoch": 2.1019108280254777,
"grad_norm": 0.24609375,
"learning_rate": 0.0001926346900410604,
"loss": 0.6275,
"step": 495
},
{
"epoch": 2.1231422505307855,
"grad_norm": 0.212890625,
"learning_rate": 0.00019235238639068856,
"loss": 0.6175,
"step": 500
},
{
"epoch": 2.1443736730360934,
"grad_norm": 0.1953125,
"learning_rate": 0.00019206498866764288,
"loss": 0.6188,
"step": 505
},
{
"epoch": 2.1656050955414012,
"grad_norm": 0.224609375,
"learning_rate": 0.0001917725127245174,
"loss": 0.6104,
"step": 510
},
{
"epoch": 2.186836518046709,
"grad_norm": 0.265625,
"learning_rate": 0.0001914749746940161,
"loss": 0.6075,
"step": 515
},
{
"epoch": 2.208067940552017,
"grad_norm": 0.201171875,
"learning_rate": 0.00019117239098806295,
"loss": 0.6254,
"step": 520
},
{
"epoch": 2.229299363057325,
"grad_norm": 0.337890625,
"learning_rate": 0.00019086477829689685,
"loss": 0.617,
"step": 525
},
{
"epoch": 2.2505307855626326,
"grad_norm": 0.271484375,
"learning_rate": 0.0001905521535881509,
"loss": 0.6188,
"step": 530
},
{
"epoch": 2.2717622080679405,
"grad_norm": 0.22265625,
"learning_rate": 0.00019023453410591635,
"loss": 0.6167,
"step": 535
},
{
"epoch": 2.2929936305732483,
"grad_norm": 0.296875,
"learning_rate": 0.00018991193736979175,
"loss": 0.6048,
"step": 540
},
{
"epoch": 2.314225053078556,
"grad_norm": 0.185546875,
"learning_rate": 0.00018958438117391618,
"loss": 0.6092,
"step": 545
},
{
"epoch": 2.335456475583864,
"grad_norm": 0.349609375,
"learning_rate": 0.00018925188358598813,
"loss": 0.6079,
"step": 550
},
{
"epoch": 2.356687898089172,
"grad_norm": 0.306640625,
"learning_rate": 0.00018891446294626866,
"loss": 0.6136,
"step": 555
},
{
"epoch": 2.3779193205944797,
"grad_norm": 0.1884765625,
"learning_rate": 0.00018857213786656985,
"loss": 0.6075,
"step": 560
},
{
"epoch": 2.3991507430997876,
"grad_norm": 0.205078125,
"learning_rate": 0.0001882249272292282,
"loss": 0.6097,
"step": 565
},
{
"epoch": 2.4203821656050954,
"grad_norm": 0.208984375,
"learning_rate": 0.00018787285018606297,
"loss": 0.6178,
"step": 570
},
{
"epoch": 2.4416135881104033,
"grad_norm": 0.2353515625,
"learning_rate": 0.00018751592615732005,
"loss": 0.6099,
"step": 575
},
{
"epoch": 2.462845010615711,
"grad_norm": 0.30859375,
"learning_rate": 0.0001871541748306005,
"loss": 0.6161,
"step": 580
},
{
"epoch": 2.484076433121019,
"grad_norm": 0.208984375,
"learning_rate": 0.00018678761615977468,
"loss": 0.616,
"step": 585
},
{
"epoch": 2.505307855626327,
"grad_norm": 0.326171875,
"learning_rate": 0.00018641627036388169,
"loss": 0.611,
"step": 590
},
{
"epoch": 2.5265392781316347,
"grad_norm": 0.23828125,
"learning_rate": 0.00018604015792601396,
"loss": 0.6063,
"step": 595
},
{
"epoch": 2.5477707006369426,
"grad_norm": 0.390625,
"learning_rate": 0.00018565929959218758,
"loss": 0.5991,
"step": 600
},
{
"epoch": 2.5690021231422504,
"grad_norm": 0.255859375,
"learning_rate": 0.0001852737163701979,
"loss": 0.6085,
"step": 605
},
{
"epoch": 2.5902335456475583,
"grad_norm": 0.2294921875,
"learning_rate": 0.00018488342952846073,
"loss": 0.6027,
"step": 610
},
{
"epoch": 2.611464968152866,
"grad_norm": 0.2236328125,
"learning_rate": 0.0001844884605948392,
"loss": 0.6108,
"step": 615
},
{
"epoch": 2.632696390658174,
"grad_norm": 0.33984375,
"learning_rate": 0.00018408883135545632,
"loss": 0.6156,
"step": 620
},
{
"epoch": 2.653927813163482,
"grad_norm": 0.228515625,
"learning_rate": 0.00018368456385349334,
"loss": 0.6135,
"step": 625
},
{
"epoch": 2.6751592356687897,
"grad_norm": 0.2177734375,
"learning_rate": 0.0001832756803879737,
"loss": 0.6064,
"step": 630
},
{
"epoch": 2.6963906581740975,
"grad_norm": 0.2236328125,
"learning_rate": 0.0001828622035125332,
"loss": 0.6026,
"step": 635
},
{
"epoch": 2.7176220806794054,
"grad_norm": 0.2197265625,
"learning_rate": 0.00018244415603417603,
"loss": 0.614,
"step": 640
},
{
"epoch": 2.738853503184713,
"grad_norm": 0.26953125,
"learning_rate": 0.00018202156101201645,
"loss": 0.6176,
"step": 645
},
{
"epoch": 2.7600849256900215,
"grad_norm": 0.2021484375,
"learning_rate": 0.00018159444175600703,
"loss": 0.6057,
"step": 650
},
{
"epoch": 2.781316348195329,
"grad_norm": 0.2490234375,
"learning_rate": 0.00018116282182565311,
"loss": 0.6044,
"step": 655
},
{
"epoch": 2.802547770700637,
"grad_norm": 0.19921875,
"learning_rate": 0.00018072672502871296,
"loss": 0.5988,
"step": 660
},
{
"epoch": 2.8237791932059446,
"grad_norm": 0.20703125,
"learning_rate": 0.00018028617541988472,
"loss": 0.5974,
"step": 665
},
{
"epoch": 2.845010615711253,
"grad_norm": 0.279296875,
"learning_rate": 0.00017984119729947944,
"loss": 0.6061,
"step": 670
},
{
"epoch": 2.8662420382165603,
"grad_norm": 0.23046875,
"learning_rate": 0.000179391815212081,
"loss": 0.6102,
"step": 675
},
{
"epoch": 2.8874734607218686,
"grad_norm": 0.2314453125,
"learning_rate": 0.0001789380539451919,
"loss": 0.6052,
"step": 680
},
{
"epoch": 2.908704883227176,
"grad_norm": 0.34765625,
"learning_rate": 0.0001784799385278661,
"loss": 0.6062,
"step": 685
},
{
"epoch": 2.9299363057324843,
"grad_norm": 0.2197265625,
"learning_rate": 0.0001780174942293287,
"loss": 0.6058,
"step": 690
},
{
"epoch": 2.9511677282377917,
"grad_norm": 0.2109375,
"learning_rate": 0.00017755074655758174,
"loss": 0.6051,
"step": 695
},
{
"epoch": 2.9723991507431,
"grad_norm": 0.283203125,
"learning_rate": 0.00017707972125799735,
"loss": 0.6019,
"step": 700
},
{
"epoch": 2.9936305732484074,
"grad_norm": 0.2099609375,
"learning_rate": 0.0001766044443118978,
"loss": 0.6047,
"step": 705
},
{
"epoch": 2.9978768577494694,
"eval_loss": 1.4090731143951416,
"eval_runtime": 0.6234,
"eval_samples_per_second": 8.021,
"eval_steps_per_second": 1.604,
"step": 706
},
{
"epoch": 3.0148619957537157,
"grad_norm": 0.259765625,
"learning_rate": 0.0001761249419351222,
"loss": 0.5844,
"step": 710
},
{
"epoch": 3.0360934182590236,
"grad_norm": 0.1923828125,
"learning_rate": 0.00017564124057658056,
"loss": 0.5729,
"step": 715
},
{
"epoch": 3.0573248407643314,
"grad_norm": 0.197265625,
"learning_rate": 0.00017515336691679477,
"loss": 0.571,
"step": 720
},
{
"epoch": 3.0785562632696393,
"grad_norm": 0.2109375,
"learning_rate": 0.0001746613478664271,
"loss": 0.5809,
"step": 725
},
{
"epoch": 3.099787685774947,
"grad_norm": 0.1923828125,
"learning_rate": 0.00017416521056479577,
"loss": 0.5789,
"step": 730
},
{
"epoch": 3.121019108280255,
"grad_norm": 0.193359375,
"learning_rate": 0.0001736649823783779,
"loss": 0.5725,
"step": 735
},
{
"epoch": 3.142250530785563,
"grad_norm": 0.23046875,
"learning_rate": 0.00017316069089930007,
"loss": 0.5729,
"step": 740
},
{
"epoch": 3.1634819532908707,
"grad_norm": 0.251953125,
"learning_rate": 0.00017265236394381633,
"loss": 0.5839,
"step": 745
},
{
"epoch": 3.1847133757961785,
"grad_norm": 0.2158203125,
"learning_rate": 0.00017214002955077393,
"loss": 0.5877,
"step": 750
},
{
"epoch": 3.2059447983014864,
"grad_norm": 0.208984375,
"learning_rate": 0.00017162371598006666,
"loss": 0.5787,
"step": 755
},
{
"epoch": 3.2271762208067942,
"grad_norm": 0.31640625,
"learning_rate": 0.0001711034517110761,
"loss": 0.5841,
"step": 760
},
{
"epoch": 3.248407643312102,
"grad_norm": 0.224609375,
"learning_rate": 0.0001705792654411007,
"loss": 0.5784,
"step": 765
},
{
"epoch": 3.26963906581741,
"grad_norm": 0.265625,
"learning_rate": 0.00017005118608377288,
"loss": 0.5898,
"step": 770
},
{
"epoch": 3.290870488322718,
"grad_norm": 0.236328125,
"learning_rate": 0.00016951924276746425,
"loss": 0.5793,
"step": 775
},
{
"epoch": 3.3121019108280256,
"grad_norm": 0.2041015625,
"learning_rate": 0.00016898346483367867,
"loss": 0.5716,
"step": 780
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.2158203125,
"learning_rate": 0.00016844388183543418,
"loss": 0.5862,
"step": 785
},
{
"epoch": 3.3545647558386413,
"grad_norm": 0.4375,
"learning_rate": 0.00016790052353563253,
"loss": 0.5867,
"step": 790
},
{
"epoch": 3.375796178343949,
"grad_norm": 0.30078125,
"learning_rate": 0.00016735341990541764,
"loss": 0.5883,
"step": 795
},
{
"epoch": 3.397027600849257,
"grad_norm": 0.287109375,
"learning_rate": 0.0001668026011225225,
"loss": 0.5884,
"step": 800
},
{
"epoch": 3.418259023354565,
"grad_norm": 0.3046875,
"learning_rate": 0.00016624809756960444,
"loss": 0.5748,
"step": 805
},
{
"epoch": 3.4394904458598727,
"grad_norm": 0.2333984375,
"learning_rate": 0.0001656899398325693,
"loss": 0.5826,
"step": 810
},
{
"epoch": 3.4607218683651806,
"grad_norm": 0.26953125,
"learning_rate": 0.0001651281586988844,
"loss": 0.5771,
"step": 815
},
{
"epoch": 3.4819532908704884,
"grad_norm": 0.24609375,
"learning_rate": 0.00016456278515588024,
"loss": 0.5772,
"step": 820
},
{
"epoch": 3.5031847133757963,
"grad_norm": 0.2119140625,
"learning_rate": 0.00016399385038904138,
"loss": 0.5811,
"step": 825
},
{
"epoch": 3.524416135881104,
"grad_norm": 0.265625,
"learning_rate": 0.00016342138578028613,
"loss": 0.5806,
"step": 830
},
{
"epoch": 3.545647558386412,
"grad_norm": 0.2041015625,
"learning_rate": 0.00016284542290623567,
"loss": 0.5873,
"step": 835
},
{
"epoch": 3.56687898089172,
"grad_norm": 0.259765625,
"learning_rate": 0.00016226599353647228,
"loss": 0.5766,
"step": 840
},
{
"epoch": 3.5881104033970277,
"grad_norm": 0.2578125,
"learning_rate": 0.00016168312963178697,
"loss": 0.5819,
"step": 845
},
{
"epoch": 3.6093418259023355,
"grad_norm": 0.19921875,
"learning_rate": 0.00016109686334241655,
"loss": 0.5832,
"step": 850
},
{
"epoch": 3.6305732484076434,
"grad_norm": 0.2021484375,
"learning_rate": 0.00016050722700627012,
"loss": 0.5846,
"step": 855
},
{
"epoch": 3.6518046709129512,
"grad_norm": 0.2158203125,
"learning_rate": 0.0001599142531471456,
"loss": 0.5818,
"step": 860
},
{
"epoch": 3.673036093418259,
"grad_norm": 0.19921875,
"learning_rate": 0.00015931797447293552,
"loss": 0.5826,
"step": 865
},
{
"epoch": 3.694267515923567,
"grad_norm": 0.236328125,
"learning_rate": 0.00015871842387382305,
"loss": 0.5823,
"step": 870
},
{
"epoch": 3.715498938428875,
"grad_norm": 0.2021484375,
"learning_rate": 0.00015811563442046767,
"loss": 0.5813,
"step": 875
},
{
"epoch": 3.7367303609341826,
"grad_norm": 0.203125,
"learning_rate": 0.00015750963936218105,
"loss": 0.581,
"step": 880
},
{
"epoch": 3.7579617834394905,
"grad_norm": 0.19921875,
"learning_rate": 0.00015690047212509316,
"loss": 0.5763,
"step": 885
},
{
"epoch": 3.7791932059447984,
"grad_norm": 0.2216796875,
"learning_rate": 0.00015628816631030836,
"loss": 0.5776,
"step": 890
},
{
"epoch": 3.800424628450106,
"grad_norm": 0.2177734375,
"learning_rate": 0.00015567275569205218,
"loss": 0.5785,
"step": 895
},
{
"epoch": 3.821656050955414,
"grad_norm": 0.193359375,
"learning_rate": 0.00015505427421580808,
"loss": 0.581,
"step": 900
},
{
"epoch": 3.842887473460722,
"grad_norm": 0.3984375,
"learning_rate": 0.00015443275599644538,
"loss": 0.5765,
"step": 905
},
{
"epoch": 3.8641188959660298,
"grad_norm": 0.208984375,
"learning_rate": 0.00015380823531633729,
"loss": 0.5751,
"step": 910
},
{
"epoch": 3.8853503184713376,
"grad_norm": 0.248046875,
"learning_rate": 0.00015318074662346994,
"loss": 0.5759,
"step": 915
},
{
"epoch": 3.9065817409766455,
"grad_norm": 0.2001953125,
"learning_rate": 0.00015255032452954245,
"loss": 0.5856,
"step": 920
},
{
"epoch": 3.9278131634819533,
"grad_norm": 0.21875,
"learning_rate": 0.00015191700380805752,
"loss": 0.5848,
"step": 925
},
{
"epoch": 3.949044585987261,
"grad_norm": 0.20703125,
"learning_rate": 0.00015128081939240357,
"loss": 0.5871,
"step": 930
},
{
"epoch": 3.970276008492569,
"grad_norm": 0.220703125,
"learning_rate": 0.00015064180637392764,
"loss": 0.5803,
"step": 935
},
{
"epoch": 3.991507430997877,
"grad_norm": 0.220703125,
"learning_rate": 0.00015000000000000001,
"loss": 0.5773,
"step": 940
},
{
"epoch": 4.0,
"eval_loss": 1.442795753479004,
"eval_runtime": 0.4901,
"eval_samples_per_second": 10.203,
"eval_steps_per_second": 2.041,
"step": 942
},
{
"epoch": 4.012738853503185,
"grad_norm": 0.2255859375,
"learning_rate": 0.00014935543567206984,
"loss": 0.5596,
"step": 945
},
{
"epoch": 4.033970276008493,
"grad_norm": 0.287109375,
"learning_rate": 0.00014870814894371245,
"loss": 0.5489,
"step": 950
},
{
"epoch": 4.055201698513801,
"grad_norm": 0.240234375,
"learning_rate": 0.00014805817551866838,
"loss": 0.5555,
"step": 955
},
{
"epoch": 4.076433121019108,
"grad_norm": 0.267578125,
"learning_rate": 0.00014740555124887375,
"loss": 0.5524,
"step": 960
},
{
"epoch": 4.097664543524417,
"grad_norm": 0.26171875,
"learning_rate": 0.00014675031213248296,
"loss": 0.5603,
"step": 965
},
{
"epoch": 4.118895966029724,
"grad_norm": 0.2041015625,
"learning_rate": 0.00014609249431188278,
"loss": 0.5587,
"step": 970
},
{
"epoch": 4.140127388535032,
"grad_norm": 0.2578125,
"learning_rate": 0.0001454321340716992,
"loss": 0.5552,
"step": 975
},
{
"epoch": 4.16135881104034,
"grad_norm": 0.25390625,
"learning_rate": 0.00014476926783679538,
"loss": 0.5542,
"step": 980
},
{
"epoch": 4.182590233545648,
"grad_norm": 0.28125,
"learning_rate": 0.00014410393217026318,
"loss": 0.5554,
"step": 985
},
{
"epoch": 4.203821656050955,
"grad_norm": 0.255859375,
"learning_rate": 0.00014343616377140582,
"loss": 0.5477,
"step": 990
},
{
"epoch": 4.225053078556264,
"grad_norm": 0.34375,
"learning_rate": 0.00014276599947371388,
"loss": 0.5477,
"step": 995
},
{
"epoch": 4.246284501061571,
"grad_norm": 0.234375,
"learning_rate": 0.0001420934762428335,
"loss": 0.5563,
"step": 1000
},
{
"epoch": 4.267515923566879,
"grad_norm": 0.2001953125,
"learning_rate": 0.00014141863117452745,
"loss": 0.5552,
"step": 1005
},
{
"epoch": 4.288747346072187,
"grad_norm": 0.259765625,
"learning_rate": 0.0001407415014926288,
"loss": 0.553,
"step": 1010
},
{
"epoch": 4.309978768577495,
"grad_norm": 0.2119140625,
"learning_rate": 0.00014006212454698797,
"loss": 0.5575,
"step": 1015
},
{
"epoch": 4.3312101910828025,
"grad_norm": 0.228515625,
"learning_rate": 0.00013938053781141222,
"loss": 0.5604,
"step": 1020
},
{
"epoch": 4.352441613588111,
"grad_norm": 0.234375,
"learning_rate": 0.00013869677888159887,
"loss": 0.5556,
"step": 1025
},
{
"epoch": 4.373673036093418,
"grad_norm": 0.25390625,
"learning_rate": 0.00013801088547306148,
"loss": 0.5557,
"step": 1030
},
{
"epoch": 4.3949044585987265,
"grad_norm": 0.224609375,
"learning_rate": 0.00013732289541904948,
"loss": 0.5595,
"step": 1035
},
{
"epoch": 4.416135881104034,
"grad_norm": 0.2109375,
"learning_rate": 0.00013663284666846134,
"loss": 0.5605,
"step": 1040
},
{
"epoch": 4.437367303609342,
"grad_norm": 0.205078125,
"learning_rate": 0.00013594077728375128,
"loss": 0.5598,
"step": 1045
},
{
"epoch": 4.45859872611465,
"grad_norm": 0.23046875,
"learning_rate": 0.00013524672543882996,
"loss": 0.559,
"step": 1050
},
{
"epoch": 4.479830148619958,
"grad_norm": 0.2080078125,
"learning_rate": 0.00013455072941695863,
"loss": 0.5612,
"step": 1055
},
{
"epoch": 4.501061571125265,
"grad_norm": 0.21484375,
"learning_rate": 0.00013385282760863758,
"loss": 0.556,
"step": 1060
},
{
"epoch": 4.522292993630574,
"grad_norm": 0.2275390625,
"learning_rate": 0.00013315305850948846,
"loss": 0.5551,
"step": 1065
},
{
"epoch": 4.543524416135881,
"grad_norm": 0.20703125,
"learning_rate": 0.00013245146071813114,
"loss": 0.5602,
"step": 1070
},
{
"epoch": 4.564755838641189,
"grad_norm": 0.208984375,
"learning_rate": 0.00013174807293405428,
"loss": 0.5467,
"step": 1075
},
{
"epoch": 4.585987261146497,
"grad_norm": 0.2177734375,
"learning_rate": 0.00013104293395548098,
"loss": 0.5551,
"step": 1080
},
{
"epoch": 4.607218683651805,
"grad_norm": 0.21484375,
"learning_rate": 0.00013033608267722858,
"loss": 0.5533,
"step": 1085
},
{
"epoch": 4.628450106157112,
"grad_norm": 0.208984375,
"learning_rate": 0.00012962755808856342,
"loss": 0.5531,
"step": 1090
},
{
"epoch": 4.649681528662421,
"grad_norm": 0.22265625,
"learning_rate": 0.0001289173992710499,
"loss": 0.5585,
"step": 1095
},
{
"epoch": 4.670912951167728,
"grad_norm": 0.25,
"learning_rate": 0.00012820564539639512,
"loss": 0.5601,
"step": 1100
},
{
"epoch": 4.692144373673036,
"grad_norm": 0.2236328125,
"learning_rate": 0.00012749233572428804,
"loss": 0.5586,
"step": 1105
},
{
"epoch": 4.713375796178344,
"grad_norm": 0.2099609375,
"learning_rate": 0.00012677750960023396,
"loss": 0.5584,
"step": 1110
},
{
"epoch": 4.734607218683652,
"grad_norm": 0.259765625,
"learning_rate": 0.0001260612064533843,
"loss": 0.5655,
"step": 1115
},
{
"epoch": 4.7558386411889595,
"grad_norm": 0.220703125,
"learning_rate": 0.0001253434657943616,
"loss": 0.5516,
"step": 1120
},
{
"epoch": 4.777070063694268,
"grad_norm": 0.19921875,
"learning_rate": 0.0001246243272130804,
"loss": 0.552,
"step": 1125
},
{
"epoch": 4.798301486199575,
"grad_norm": 0.205078125,
"learning_rate": 0.00012390383037656327,
"loss": 0.5549,
"step": 1130
},
{
"epoch": 4.8195329087048835,
"grad_norm": 0.228515625,
"learning_rate": 0.00012318201502675285,
"loss": 0.5564,
"step": 1135
},
{
"epoch": 4.840764331210191,
"grad_norm": 0.21875,
"learning_rate": 0.00012245892097831982,
"loss": 0.5617,
"step": 1140
},
{
"epoch": 4.861995753715499,
"grad_norm": 0.2734375,
"learning_rate": 0.0001217345881164667,
"loss": 0.564,
"step": 1145
},
{
"epoch": 4.883227176220807,
"grad_norm": 0.296875,
"learning_rate": 0.00012100905639472779,
"loss": 0.5534,
"step": 1150
},
{
"epoch": 4.904458598726115,
"grad_norm": 0.296875,
"learning_rate": 0.00012028236583276542,
"loss": 0.5556,
"step": 1155
},
{
"epoch": 4.925690021231422,
"grad_norm": 0.224609375,
"learning_rate": 0.00011955455651416246,
"loss": 0.5656,
"step": 1160
},
{
"epoch": 4.946921443736731,
"grad_norm": 0.21875,
"learning_rate": 0.00011882566858421135,
"loss": 0.5595,
"step": 1165
},
{
"epoch": 4.968152866242038,
"grad_norm": 0.2373046875,
"learning_rate": 0.00011809574224769981,
"loss": 0.56,
"step": 1170
},
{
"epoch": 4.989384288747346,
"grad_norm": 0.2265625,
"learning_rate": 0.00011736481776669306,
"loss": 0.5548,
"step": 1175
},
{
"epoch": 4.997876857749469,
"eval_loss": 1.4904053211212158,
"eval_runtime": 0.6021,
"eval_samples_per_second": 8.304,
"eval_steps_per_second": 1.661,
"step": 1177
},
{
"epoch": 5.010615711252654,
"grad_norm": 0.21484375,
"learning_rate": 0.00011663293545831302,
"loss": 0.5538,
"step": 1180
},
{
"epoch": 5.031847133757962,
"grad_norm": 0.255859375,
"learning_rate": 0.00011590013569251457,
"loss": 0.5333,
"step": 1185
},
{
"epoch": 5.053078556263269,
"grad_norm": 0.2265625,
"learning_rate": 0.0001151664588898586,
"loss": 0.5372,
"step": 1190
},
{
"epoch": 5.074309978768578,
"grad_norm": 0.27734375,
"learning_rate": 0.00011443194551928266,
"loss": 0.5291,
"step": 1195
},
{
"epoch": 5.095541401273885,
"grad_norm": 0.2197265625,
"learning_rate": 0.00011369663609586854,
"loss": 0.5301,
"step": 1200
},
{
"epoch": 5.116772823779193,
"grad_norm": 0.21484375,
"learning_rate": 0.00011296057117860759,
"loss": 0.5246,
"step": 1205
},
{
"epoch": 5.138004246284501,
"grad_norm": 0.267578125,
"learning_rate": 0.00011222379136816345,
"loss": 0.5341,
"step": 1210
},
{
"epoch": 5.159235668789809,
"grad_norm": 0.2578125,
"learning_rate": 0.00011148633730463273,
"loss": 0.5391,
"step": 1215
},
{
"epoch": 5.1804670912951165,
"grad_norm": 0.220703125,
"learning_rate": 0.00011074824966530312,
"loss": 0.5354,
"step": 1220
},
{
"epoch": 5.201698513800425,
"grad_norm": 0.2177734375,
"learning_rate": 0.00011000956916240985,
"loss": 0.5315,
"step": 1225
},
{
"epoch": 5.222929936305732,
"grad_norm": 0.2216796875,
"learning_rate": 0.00010927033654088983,
"loss": 0.5396,
"step": 1230
},
{
"epoch": 5.2441613588110405,
"grad_norm": 0.26953125,
"learning_rate": 0.00010853059257613448,
"loss": 0.5375,
"step": 1235
},
{
"epoch": 5.265392781316348,
"grad_norm": 0.28125,
"learning_rate": 0.00010779037807174033,
"loss": 0.5332,
"step": 1240
},
{
"epoch": 5.286624203821656,
"grad_norm": 0.244140625,
"learning_rate": 0.00010704973385725851,
"loss": 0.5324,
"step": 1245
},
{
"epoch": 5.307855626326964,
"grad_norm": 0.2333984375,
"learning_rate": 0.00010630870078594249,
"loss": 0.5344,
"step": 1250
},
{
"epoch": 5.329087048832272,
"grad_norm": 0.373046875,
"learning_rate": 0.00010556731973249485,
"loss": 0.5272,
"step": 1255
},
{
"epoch": 5.350318471337579,
"grad_norm": 0.287109375,
"learning_rate": 0.00010482563159081238,
"loss": 0.5331,
"step": 1260
},
{
"epoch": 5.371549893842888,
"grad_norm": 0.24609375,
"learning_rate": 0.00010408367727173067,
"loss": 0.5327,
"step": 1265
},
{
"epoch": 5.392781316348195,
"grad_norm": 0.23046875,
"learning_rate": 0.00010334149770076747,
"loss": 0.5319,
"step": 1270
},
{
"epoch": 5.414012738853503,
"grad_norm": 0.240234375,
"learning_rate": 0.0001025991338158651,
"loss": 0.5439,
"step": 1275
},
{
"epoch": 5.435244161358811,
"grad_norm": 0.2353515625,
"learning_rate": 0.00010185662656513251,
"loss": 0.5419,
"step": 1280
},
{
"epoch": 5.456475583864119,
"grad_norm": 0.224609375,
"learning_rate": 0.00010111401690458654,
"loss": 0.5375,
"step": 1285
},
{
"epoch": 5.477707006369426,
"grad_norm": 0.2421875,
"learning_rate": 0.00010037134579589302,
"loss": 0.5351,
"step": 1290
},
{
"epoch": 5.498938428874735,
"grad_norm": 0.26953125,
"learning_rate": 9.962865420410701e-05,
"loss": 0.53,
"step": 1295
},
{
"epoch": 5.520169851380042,
"grad_norm": 0.291015625,
"learning_rate": 9.888598309541347e-05,
"loss": 0.5318,
"step": 1300
},
{
"epoch": 5.54140127388535,
"grad_norm": 0.2314453125,
"learning_rate": 9.814337343486754e-05,
"loss": 0.5348,
"step": 1305
},
{
"epoch": 5.562632696390658,
"grad_norm": 0.33203125,
"learning_rate": 9.740086618413495e-05,
"loss": 0.5301,
"step": 1310
},
{
"epoch": 5.583864118895966,
"grad_norm": 0.2578125,
"learning_rate": 9.665850229923258e-05,
"loss": 0.5333,
"step": 1315
},
{
"epoch": 5.6050955414012735,
"grad_norm": 0.2373046875,
"learning_rate": 9.591632272826934e-05,
"loss": 0.5344,
"step": 1320
},
{
"epoch": 5.626326963906582,
"grad_norm": 0.2138671875,
"learning_rate": 9.517436840918766e-05,
"loss": 0.529,
"step": 1325
},
{
"epoch": 5.647558386411889,
"grad_norm": 0.2216796875,
"learning_rate": 9.44326802675052e-05,
"loss": 0.5346,
"step": 1330
},
{
"epoch": 5.6687898089171975,
"grad_norm": 0.279296875,
"learning_rate": 9.369129921405754e-05,
"loss": 0.5354,
"step": 1335
},
{
"epoch": 5.690021231422505,
"grad_norm": 0.2431640625,
"learning_rate": 9.295026614274152e-05,
"loss": 0.5303,
"step": 1340
},
{
"epoch": 5.711252653927813,
"grad_norm": 0.2578125,
"learning_rate": 9.220962192825968e-05,
"loss": 0.5366,
"step": 1345
},
{
"epoch": 5.732484076433121,
"grad_norm": 0.248046875,
"learning_rate": 9.146940742386553e-05,
"loss": 0.542,
"step": 1350
},
{
"epoch": 5.753715498938429,
"grad_norm": 0.21875,
"learning_rate": 9.072966345911019e-05,
"loss": 0.5402,
"step": 1355
},
{
"epoch": 5.774946921443737,
"grad_norm": 0.2158203125,
"learning_rate": 8.999043083759017e-05,
"loss": 0.5336,
"step": 1360
},
{
"epoch": 5.796178343949045,
"grad_norm": 0.25,
"learning_rate": 8.925175033469688e-05,
"loss": 0.5389,
"step": 1365
},
{
"epoch": 5.817409766454352,
"grad_norm": 0.2119140625,
"learning_rate": 8.851366269536729e-05,
"loss": 0.5355,
"step": 1370
},
{
"epoch": 5.83864118895966,
"grad_norm": 0.251953125,
"learning_rate": 8.777620863183657e-05,
"loss": 0.5234,
"step": 1375
},
{
"epoch": 5.859872611464969,
"grad_norm": 0.255859375,
"learning_rate": 8.703942882139245e-05,
"loss": 0.5318,
"step": 1380
},
{
"epoch": 5.881104033970276,
"grad_norm": 0.2294921875,
"learning_rate": 8.630336390413147e-05,
"loss": 0.5371,
"step": 1385
},
{
"epoch": 5.902335456475583,
"grad_norm": 0.22265625,
"learning_rate": 8.556805448071735e-05,
"loss": 0.5317,
"step": 1390
},
{
"epoch": 5.923566878980892,
"grad_norm": 0.2255859375,
"learning_rate": 8.483354111014141e-05,
"loss": 0.5331,
"step": 1395
},
{
"epoch": 5.9447983014862,
"grad_norm": 0.220703125,
"learning_rate": 8.409986430748545e-05,
"loss": 0.5373,
"step": 1400
},
{
"epoch": 5.966029723991507,
"grad_norm": 0.240234375,
"learning_rate": 8.336706454168701e-05,
"loss": 0.5328,
"step": 1405
},
{
"epoch": 5.987261146496815,
"grad_norm": 0.2578125,
"learning_rate": 8.263518223330697e-05,
"loss": 0.5409,
"step": 1410
},
{
"epoch": 6.0,
"eval_loss": 1.5479737520217896,
"eval_runtime": 0.4964,
"eval_samples_per_second": 10.072,
"eval_steps_per_second": 2.014,
"step": 1413
},
{
"epoch": 6.008492569002123,
"grad_norm": 0.220703125,
"learning_rate": 8.190425775230021e-05,
"loss": 0.5317,
"step": 1415
},
{
"epoch": 6.029723991507431,
"grad_norm": 0.228515625,
"learning_rate": 8.117433141578866e-05,
"loss": 0.5111,
"step": 1420
},
{
"epoch": 6.050955414012739,
"grad_norm": 0.2275390625,
"learning_rate": 8.044544348583755e-05,
"loss": 0.5147,
"step": 1425
},
{
"epoch": 6.072186836518047,
"grad_norm": 0.2197265625,
"learning_rate": 7.971763416723459e-05,
"loss": 0.5099,
"step": 1430
},
{
"epoch": 6.0934182590233545,
"grad_norm": 0.251953125,
"learning_rate": 7.89909436052722e-05,
"loss": 0.511,
"step": 1435
},
{
"epoch": 6.114649681528663,
"grad_norm": 0.23046875,
"learning_rate": 7.826541188353329e-05,
"loss": 0.5043,
"step": 1440
},
{
"epoch": 6.13588110403397,
"grad_norm": 0.232421875,
"learning_rate": 7.754107902168019e-05,
"loss": 0.5112,
"step": 1445
},
{
"epoch": 6.1571125265392785,
"grad_norm": 0.26953125,
"learning_rate": 7.681798497324716e-05,
"loss": 0.5119,
"step": 1450
},
{
"epoch": 6.178343949044586,
"grad_norm": 0.234375,
"learning_rate": 7.609616962343675e-05,
"loss": 0.5177,
"step": 1455
},
{
"epoch": 6.199575371549894,
"grad_norm": 0.232421875,
"learning_rate": 7.537567278691964e-05,
"loss": 0.5208,
"step": 1460
},
{
"epoch": 6.220806794055202,
"grad_norm": 0.23046875,
"learning_rate": 7.465653420563845e-05,
"loss": 0.5126,
"step": 1465
},
{
"epoch": 6.24203821656051,
"grad_norm": 0.2412109375,
"learning_rate": 7.393879354661577e-05,
"loss": 0.5185,
"step": 1470
},
{
"epoch": 6.263269639065817,
"grad_norm": 0.26171875,
"learning_rate": 7.322249039976608e-05,
"loss": 0.5171,
"step": 1475
},
{
"epoch": 6.284501061571126,
"grad_norm": 0.24609375,
"learning_rate": 7.2507664275712e-05,
"loss": 0.5096,
"step": 1480
},
{
"epoch": 6.305732484076433,
"grad_norm": 0.2578125,
"learning_rate": 7.179435460360491e-05,
"loss": 0.5116,
"step": 1485
},
{
"epoch": 6.326963906581741,
"grad_norm": 0.224609375,
"learning_rate": 7.108260072895013e-05,
"loss": 0.5104,
"step": 1490
},
{
"epoch": 6.348195329087049,
"grad_norm": 0.2353515625,
"learning_rate": 7.037244191143661e-05,
"loss": 0.5165,
"step": 1495
},
{
"epoch": 6.369426751592357,
"grad_norm": 0.275390625,
"learning_rate": 6.966391732277143e-05,
"loss": 0.5186,
"step": 1500
},
{
"epoch": 6.3906581740976645,
"grad_norm": 0.2294921875,
"learning_rate": 6.895706604451905e-05,
"loss": 0.5124,
"step": 1505
},
{
"epoch": 6.411889596602973,
"grad_norm": 0.2431640625,
"learning_rate": 6.825192706594575e-05,
"loss": 0.5181,
"step": 1510
},
{
"epoch": 6.43312101910828,
"grad_norm": 0.2353515625,
"learning_rate": 6.75485392818689e-05,
"loss": 0.5156,
"step": 1515
},
{
"epoch": 6.4543524416135885,
"grad_norm": 0.26953125,
"learning_rate": 6.684694149051156e-05,
"loss": 0.5164,
"step": 1520
},
{
"epoch": 6.475583864118896,
"grad_norm": 0.23828125,
"learning_rate": 6.614717239136246e-05,
"loss": 0.5055,
"step": 1525
},
{
"epoch": 6.496815286624204,
"grad_norm": 0.2431640625,
"learning_rate": 6.54492705830414e-05,
"loss": 0.5179,
"step": 1530
},
{
"epoch": 6.518046709129512,
"grad_norm": 0.2412109375,
"learning_rate": 6.475327456117005e-05,
"loss": 0.5144,
"step": 1535
},
{
"epoch": 6.53927813163482,
"grad_norm": 0.251953125,
"learning_rate": 6.405922271624874e-05,
"loss": 0.5117,
"step": 1540
},
{
"epoch": 6.560509554140127,
"grad_norm": 0.2490234375,
"learning_rate": 6.336715333153869e-05,
"loss": 0.5143,
"step": 1545
},
{
"epoch": 6.581740976645436,
"grad_norm": 0.25,
"learning_rate": 6.267710458095053e-05,
"loss": 0.5136,
"step": 1550
},
{
"epoch": 6.602972399150743,
"grad_norm": 0.23046875,
"learning_rate": 6.198911452693853e-05,
"loss": 0.5133,
"step": 1555
},
{
"epoch": 6.624203821656051,
"grad_norm": 0.2294921875,
"learning_rate": 6.130322111840114e-05,
"loss": 0.5162,
"step": 1560
},
{
"epoch": 6.645435244161359,
"grad_norm": 0.2431640625,
"learning_rate": 6.0619462188587793e-05,
"loss": 0.5171,
"step": 1565
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.228515625,
"learning_rate": 5.993787545301204e-05,
"loss": 0.519,
"step": 1570
},
{
"epoch": 6.687898089171974,
"grad_norm": 0.232421875,
"learning_rate": 5.9258498507371194e-05,
"loss": 0.5143,
"step": 1575
},
{
"epoch": 6.709129511677283,
"grad_norm": 0.251953125,
"learning_rate": 5.8581368825472585e-05,
"loss": 0.5238,
"step": 1580
},
{
"epoch": 6.73036093418259,
"grad_norm": 0.2578125,
"learning_rate": 5.790652375716652e-05,
"loss": 0.513,
"step": 1585
},
{
"epoch": 6.751592356687898,
"grad_norm": 0.234375,
"learning_rate": 5.7234000526286156e-05,
"loss": 0.5129,
"step": 1590
},
{
"epoch": 6.772823779193206,
"grad_norm": 0.25,
"learning_rate": 5.656383622859418e-05,
"loss": 0.5251,
"step": 1595
},
{
"epoch": 6.794055201698514,
"grad_norm": 0.224609375,
"learning_rate": 5.589606782973683e-05,
"loss": 0.5158,
"step": 1600
},
{
"epoch": 6.8152866242038215,
"grad_norm": 0.2392578125,
"learning_rate": 5.5230732163204615e-05,
"loss": 0.5169,
"step": 1605
},
{
"epoch": 6.83651804670913,
"grad_norm": 0.275390625,
"learning_rate": 5.456786592830083e-05,
"loss": 0.5203,
"step": 1610
},
{
"epoch": 6.857749469214437,
"grad_norm": 0.2275390625,
"learning_rate": 5.39075056881172e-05,
"loss": 0.5127,
"step": 1615
},
{
"epoch": 6.8789808917197455,
"grad_norm": 0.2333984375,
"learning_rate": 5.3249687867517095e-05,
"loss": 0.5176,
"step": 1620
},
{
"epoch": 6.900212314225053,
"grad_norm": 0.26171875,
"learning_rate": 5.259444875112624e-05,
"loss": 0.5163,
"step": 1625
},
{
"epoch": 6.921443736730361,
"grad_norm": 0.296875,
"learning_rate": 5.1941824481331626e-05,
"loss": 0.5256,
"step": 1630
},
{
"epoch": 6.942675159235669,
"grad_norm": 0.234375,
"learning_rate": 5.129185105628756e-05,
"loss": 0.5199,
"step": 1635
},
{
"epoch": 6.963906581740977,
"grad_norm": 0.275390625,
"learning_rate": 5.064456432793019e-05,
"loss": 0.5166,
"step": 1640
},
{
"epoch": 6.985138004246284,
"grad_norm": 0.240234375,
"learning_rate": 5.000000000000002e-05,
"loss": 0.5151,
"step": 1645
},
{
"epoch": 6.997876857749469,
"eval_loss": 1.6102020740509033,
"eval_runtime": 0.6273,
"eval_samples_per_second": 7.971,
"eval_steps_per_second": 1.594,
"step": 1648
},
{
"epoch": 7.006369426751593,
"grad_norm": 0.2275390625,
"learning_rate": 4.93581936260724e-05,
"loss": 0.504,
"step": 1650
},
{
"epoch": 7.0276008492569,
"grad_norm": 0.2412109375,
"learning_rate": 4.8719180607596484e-05,
"loss": 0.4887,
"step": 1655
},
{
"epoch": 7.048832271762208,
"grad_norm": 0.2451171875,
"learning_rate": 4.808299619194251e-05,
"loss": 0.502,
"step": 1660
},
{
"epoch": 7.070063694267516,
"grad_norm": 0.2333984375,
"learning_rate": 4.744967547045754e-05,
"loss": 0.496,
"step": 1665
},
{
"epoch": 7.091295116772824,
"grad_norm": 0.2275390625,
"learning_rate": 4.681925337653006e-05,
"loss": 0.4983,
"step": 1670
},
{
"epoch": 7.112526539278131,
"grad_norm": 0.2392578125,
"learning_rate": 4.6191764683662744e-05,
"loss": 0.4982,
"step": 1675
},
{
"epoch": 7.13375796178344,
"grad_norm": 0.248046875,
"learning_rate": 4.5567244003554645e-05,
"loss": 0.5054,
"step": 1680
},
{
"epoch": 7.154989384288747,
"grad_norm": 0.240234375,
"learning_rate": 4.494572578419194e-05,
"loss": 0.5019,
"step": 1685
},
{
"epoch": 7.176220806794055,
"grad_norm": 0.2431640625,
"learning_rate": 4.432724430794786e-05,
"loss": 0.4981,
"step": 1690
},
{
"epoch": 7.197452229299363,
"grad_norm": 0.26171875,
"learning_rate": 4.371183368969165e-05,
"loss": 0.5,
"step": 1695
},
{
"epoch": 7.218683651804671,
"grad_norm": 0.251953125,
"learning_rate": 4.309952787490689e-05,
"loss": 0.501,
"step": 1700
},
{
"epoch": 7.2399150743099785,
"grad_norm": 0.25390625,
"learning_rate": 4.249036063781896e-05,
"loss": 0.5,
"step": 1705
},
{
"epoch": 7.261146496815287,
"grad_norm": 0.2421875,
"learning_rate": 4.1884365579532346e-05,
"loss": 0.4991,
"step": 1710
},
{
"epoch": 7.282377919320594,
"grad_norm": 0.255859375,
"learning_rate": 4.128157612617696e-05,
"loss": 0.4962,
"step": 1715
},
{
"epoch": 7.3036093418259025,
"grad_norm": 0.2392578125,
"learning_rate": 4.0682025527064486e-05,
"loss": 0.5008,
"step": 1720
},
{
"epoch": 7.32484076433121,
"grad_norm": 0.236328125,
"learning_rate": 4.008574685285442e-05,
"loss": 0.5016,
"step": 1725
},
{
"epoch": 7.346072186836518,
"grad_norm": 0.23828125,
"learning_rate": 3.94927729937299e-05,
"loss": 0.4939,
"step": 1730
},
{
"epoch": 7.367303609341826,
"grad_norm": 0.248046875,
"learning_rate": 3.890313665758348e-05,
"loss": 0.5046,
"step": 1735
},
{
"epoch": 7.388535031847134,
"grad_norm": 0.2373046875,
"learning_rate": 3.8316870368213e-05,
"loss": 0.4882,
"step": 1740
},
{
"epoch": 7.409766454352441,
"grad_norm": 0.2470703125,
"learning_rate": 3.773400646352769e-05,
"loss": 0.5053,
"step": 1745
},
{
"epoch": 7.43099787685775,
"grad_norm": 0.2412109375,
"learning_rate": 3.7154577093764334e-05,
"loss": 0.5065,
"step": 1750
},
{
"epoch": 7.452229299363057,
"grad_norm": 0.2392578125,
"learning_rate": 3.657861421971388e-05,
"loss": 0.5005,
"step": 1755
},
{
"epoch": 7.473460721868365,
"grad_norm": 0.263671875,
"learning_rate": 3.6006149610958625e-05,
"loss": 0.5066,
"step": 1760
},
{
"epoch": 7.494692144373673,
"grad_norm": 0.2451171875,
"learning_rate": 3.543721484411976e-05,
"loss": 0.5019,
"step": 1765
},
{
"epoch": 7.515923566878981,
"grad_norm": 0.287109375,
"learning_rate": 3.487184130111562e-05,
"loss": 0.493,
"step": 1770
},
{
"epoch": 7.537154989384288,
"grad_norm": 0.251953125,
"learning_rate": 3.4310060167430725e-05,
"loss": 0.5087,
"step": 1775
},
{
"epoch": 7.558386411889597,
"grad_norm": 0.26171875,
"learning_rate": 3.375190243039556e-05,
"loss": 0.5032,
"step": 1780
},
{
"epoch": 7.579617834394904,
"grad_norm": 0.2431640625,
"learning_rate": 3.319739887747752e-05,
"loss": 0.5004,
"step": 1785
},
{
"epoch": 7.600849256900212,
"grad_norm": 0.2392578125,
"learning_rate": 3.264658009458239e-05,
"loss": 0.5014,
"step": 1790
},
{
"epoch": 7.62208067940552,
"grad_norm": 0.2578125,
"learning_rate": 3.209947646436752e-05,
"loss": 0.5044,
"step": 1795
},
{
"epoch": 7.643312101910828,
"grad_norm": 0.2392578125,
"learning_rate": 3.155611816456586e-05,
"loss": 0.5027,
"step": 1800
},
{
"epoch": 7.6645435244161355,
"grad_norm": 0.2431640625,
"learning_rate": 3.1016535166321356e-05,
"loss": 0.5004,
"step": 1805
},
{
"epoch": 7.685774946921444,
"grad_norm": 0.2490234375,
"learning_rate": 3.0480757232535772e-05,
"loss": 0.5016,
"step": 1810
},
{
"epoch": 7.707006369426751,
"grad_norm": 0.2490234375,
"learning_rate": 2.9948813916227115e-05,
"loss": 0.5026,
"step": 1815
},
{
"epoch": 7.7282377919320595,
"grad_norm": 0.2470703125,
"learning_rate": 2.9420734558899322e-05,
"loss": 0.5047,
"step": 1820
},
{
"epoch": 7.749469214437367,
"grad_norm": 0.2333984375,
"learning_rate": 2.889654828892393e-05,
"loss": 0.5015,
"step": 1825
},
{
"epoch": 7.770700636942675,
"grad_norm": 0.2490234375,
"learning_rate": 2.8376284019933373e-05,
"loss": 0.505,
"step": 1830
},
{
"epoch": 7.7919320594479835,
"grad_norm": 0.25,
"learning_rate": 2.7859970449226104e-05,
"loss": 0.5035,
"step": 1835
},
{
"epoch": 7.813163481953291,
"grad_norm": 0.248046875,
"learning_rate": 2.73476360561837e-05,
"loss": 0.5021,
"step": 1840
},
{
"epoch": 7.834394904458598,
"grad_norm": 0.240234375,
"learning_rate": 2.6839309100699973e-05,
"loss": 0.4994,
"step": 1845
},
{
"epoch": 7.855626326963907,
"grad_norm": 0.24609375,
"learning_rate": 2.6335017621622116e-05,
"loss": 0.5017,
"step": 1850
},
{
"epoch": 7.876857749469215,
"grad_norm": 0.23046875,
"learning_rate": 2.5834789435204243e-05,
"loss": 0.4948,
"step": 1855
},
{
"epoch": 7.898089171974522,
"grad_norm": 0.23828125,
"learning_rate": 2.5338652133572915e-05,
"loss": 0.4898,
"step": 1860
},
{
"epoch": 7.91932059447983,
"grad_norm": 0.2412109375,
"learning_rate": 2.4846633083205263e-05,
"loss": 0.5079,
"step": 1865
},
{
"epoch": 7.940552016985138,
"grad_norm": 0.279296875,
"learning_rate": 2.4358759423419474e-05,
"loss": 0.4995,
"step": 1870
},
{
"epoch": 7.961783439490446,
"grad_norm": 0.2431640625,
"learning_rate": 2.3875058064877807e-05,
"loss": 0.4999,
"step": 1875
},
{
"epoch": 7.983014861995754,
"grad_norm": 0.23828125,
"learning_rate": 2.339555568810221e-05,
"loss": 0.4987,
"step": 1880
},
{
"epoch": 8.0,
"eval_loss": 1.6578315496444702,
"eval_runtime": 0.4956,
"eval_samples_per_second": 10.088,
"eval_steps_per_second": 2.018,
"step": 1884
},
{
"epoch": 8.004246284501061,
"grad_norm": 0.2578125,
"learning_rate": 2.2920278742002676e-05,
"loss": 0.4963,
"step": 1885
},
{
"epoch": 8.02547770700637,
"grad_norm": 0.2412109375,
"learning_rate": 2.244925344241828e-05,
"loss": 0.4973,
"step": 1890
},
{
"epoch": 8.046709129511678,
"grad_norm": 0.234375,
"learning_rate": 2.1982505770671303e-05,
"loss": 0.491,
"step": 1895
},
{
"epoch": 8.067940552016985,
"grad_norm": 0.26171875,
"learning_rate": 2.1520061472133902e-05,
"loss": 0.4888,
"step": 1900
},
{
"epoch": 8.089171974522293,
"grad_norm": 0.2392578125,
"learning_rate": 2.1061946054808146e-05,
"loss": 0.4977,
"step": 1905
},
{
"epoch": 8.110403397027602,
"grad_norm": 0.2412109375,
"learning_rate": 2.0608184787919026e-05,
"loss": 0.4917,
"step": 1910
},
{
"epoch": 8.13163481953291,
"grad_norm": 0.2578125,
"learning_rate": 2.0158802700520574e-05,
"loss": 0.4969,
"step": 1915
},
{
"epoch": 8.152866242038217,
"grad_norm": 0.2421875,
"learning_rate": 1.9713824580115335e-05,
"loss": 0.4918,
"step": 1920
},
{
"epoch": 8.174097664543524,
"grad_norm": 0.2421875,
"learning_rate": 1.927327497128706e-05,
"loss": 0.4919,
"step": 1925
},
{
"epoch": 8.195329087048833,
"grad_norm": 0.2421875,
"learning_rate": 1.883717817434688e-05,
"loss": 0.4926,
"step": 1930
},
{
"epoch": 8.21656050955414,
"grad_norm": 0.2431640625,
"learning_rate": 1.840555824399296e-05,
"loss": 0.4913,
"step": 1935
},
{
"epoch": 8.237791932059448,
"grad_norm": 0.255859375,
"learning_rate": 1.797843898798358e-05,
"loss": 0.4879,
"step": 1940
},
{
"epoch": 8.259023354564755,
"grad_norm": 0.2421875,
"learning_rate": 1.7555843965823992e-05,
"loss": 0.5012,
"step": 1945
},
{
"epoch": 8.280254777070065,
"grad_norm": 0.2412109375,
"learning_rate": 1.7137796487466797e-05,
"loss": 0.4956,
"step": 1950
},
{
"epoch": 8.301486199575372,
"grad_norm": 0.25,
"learning_rate": 1.672431961202635e-05,
"loss": 0.4959,
"step": 1955
},
{
"epoch": 8.32271762208068,
"grad_norm": 0.2451171875,
"learning_rate": 1.6315436146506703e-05,
"loss": 0.4888,
"step": 1960
},
{
"epoch": 8.343949044585987,
"grad_norm": 0.2421875,
"learning_rate": 1.5911168644543707e-05,
"loss": 0.4824,
"step": 1965
},
{
"epoch": 8.365180467091296,
"grad_norm": 0.2431640625,
"learning_rate": 1.5511539405160825e-05,
"loss": 0.501,
"step": 1970
},
{
"epoch": 8.386411889596603,
"grad_norm": 0.2490234375,
"learning_rate": 1.5116570471539293e-05,
"loss": 0.4885,
"step": 1975
},
{
"epoch": 8.40764331210191,
"grad_norm": 0.24609375,
"learning_rate": 1.4726283629802107e-05,
"loss": 0.4921,
"step": 1980
},
{
"epoch": 8.428874734607218,
"grad_norm": 0.259765625,
"learning_rate": 1.4340700407812435e-05,
"loss": 0.4962,
"step": 1985
},
{
"epoch": 8.450106157112527,
"grad_norm": 0.236328125,
"learning_rate": 1.3959842073986085e-05,
"loss": 0.4892,
"step": 1990
},
{
"epoch": 8.471337579617835,
"grad_norm": 0.2412109375,
"learning_rate": 1.3583729636118358e-05,
"loss": 0.497,
"step": 1995
},
{
"epoch": 8.492569002123142,
"grad_norm": 0.251953125,
"learning_rate": 1.3212383840225329e-05,
"loss": 0.4964,
"step": 2000
},
{
"epoch": 8.51380042462845,
"grad_norm": 0.2392578125,
"learning_rate": 1.2845825169399507e-05,
"loss": 0.4892,
"step": 2005
},
{
"epoch": 8.535031847133759,
"grad_norm": 0.2578125,
"learning_rate": 1.2484073842679944e-05,
"loss": 0.4951,
"step": 2010
},
{
"epoch": 8.556263269639066,
"grad_norm": 0.2392578125,
"learning_rate": 1.2127149813937022e-05,
"loss": 0.4949,
"step": 2015
},
{
"epoch": 8.577494692144374,
"grad_norm": 0.240234375,
"learning_rate": 1.1775072770771834e-05,
"loss": 0.4906,
"step": 2020
},
{
"epoch": 8.598726114649681,
"grad_norm": 0.2373046875,
"learning_rate": 1.1427862133430156e-05,
"loss": 0.4836,
"step": 2025
},
{
"epoch": 8.61995753715499,
"grad_norm": 0.2412109375,
"learning_rate": 1.1085537053731354e-05,
"loss": 0.4944,
"step": 2030
},
{
"epoch": 8.641188959660298,
"grad_norm": 0.2373046875,
"learning_rate": 1.0748116414011888e-05,
"loss": 0.4908,
"step": 2035
},
{
"epoch": 8.662420382165605,
"grad_norm": 0.2412109375,
"learning_rate": 1.0415618826083828e-05,
"loss": 0.4969,
"step": 2040
},
{
"epoch": 8.683651804670912,
"grad_norm": 0.251953125,
"learning_rate": 1.0088062630208273e-05,
"loss": 0.4904,
"step": 2045
},
{
"epoch": 8.704883227176222,
"grad_norm": 0.26171875,
"learning_rate": 9.765465894083636e-06,
"loss": 0.4991,
"step": 2050
},
{
"epoch": 8.726114649681529,
"grad_norm": 0.2470703125,
"learning_rate": 9.447846411849115e-06,
"loss": 0.4971,
"step": 2055
},
{
"epoch": 8.747346072186836,
"grad_norm": 0.25,
"learning_rate": 9.135221703103136e-06,
"loss": 0.4914,
"step": 2060
},
{
"epoch": 8.768577494692144,
"grad_norm": 0.2451171875,
"learning_rate": 8.827609011937066e-06,
"loss": 0.4919,
"step": 2065
},
{
"epoch": 8.789808917197453,
"grad_norm": 0.259765625,
"learning_rate": 8.525025305983936e-06,
"loss": 0.4997,
"step": 2070
},
{
"epoch": 8.81104033970276,
"grad_norm": 0.2412109375,
"learning_rate": 8.227487275482592e-06,
"loss": 0.4879,
"step": 2075
},
{
"epoch": 8.832271762208068,
"grad_norm": 0.23828125,
"learning_rate": 7.935011332357112e-06,
"loss": 0.4957,
"step": 2080
},
{
"epoch": 8.853503184713375,
"grad_norm": 0.244140625,
"learning_rate": 7.647613609311455e-06,
"loss": 0.492,
"step": 2085
},
{
"epoch": 8.874734607218684,
"grad_norm": 0.2353515625,
"learning_rate": 7.365309958939615e-06,
"loss": 0.4913,
"step": 2090
},
{
"epoch": 8.895966029723992,
"grad_norm": 0.2431640625,
"learning_rate": 7.088115952851238e-06,
"loss": 0.4872,
"step": 2095
},
{
"epoch": 8.9171974522293,
"grad_norm": 0.2490234375,
"learning_rate": 6.81604688081271e-06,
"loss": 0.49,
"step": 2100
},
{
"epoch": 8.938428874734607,
"grad_norm": 0.267578125,
"learning_rate": 6.549117749903755e-06,
"loss": 0.4884,
"step": 2105
},
{
"epoch": 8.959660297239916,
"grad_norm": 0.248046875,
"learning_rate": 6.287343283689661e-06,
"loss": 0.4897,
"step": 2110
},
{
"epoch": 8.980891719745223,
"grad_norm": 0.234375,
"learning_rate": 6.030737921409169e-06,
"loss": 0.4875,
"step": 2115
},
{
"epoch": 8.99787685774947,
"eval_loss": 1.6812984943389893,
"eval_runtime": 0.4908,
"eval_samples_per_second": 10.188,
"eval_steps_per_second": 2.038,
"step": 2119
},
{
"epoch": 9.00212314225053,
"grad_norm": 0.2373046875,
"learning_rate": 5.779315817178e-06,
"loss": 0.4972,
"step": 2120
},
{
"epoch": 9.023354564755838,
"grad_norm": 0.2392578125,
"learning_rate": 5.533090839208133e-06,
"loss": 0.4875,
"step": 2125
},
{
"epoch": 9.044585987261147,
"grad_norm": 0.244140625,
"learning_rate": 5.292076569042826e-06,
"loss": 0.4966,
"step": 2130
},
{
"epoch": 9.065817409766455,
"grad_norm": 0.2431640625,
"learning_rate": 5.056286300807511e-06,
"loss": 0.4931,
"step": 2135
},
{
"epoch": 9.087048832271762,
"grad_norm": 0.2353515625,
"learning_rate": 4.825733040476465e-06,
"loss": 0.4863,
"step": 2140
},
{
"epoch": 9.10828025477707,
"grad_norm": 0.2353515625,
"learning_rate": 4.600429505155424e-06,
"loss": 0.4945,
"step": 2145
},
{
"epoch": 9.129511677282379,
"grad_norm": 0.26171875,
"learning_rate": 4.380388122380141e-06,
"loss": 0.4945,
"step": 2150
},
{
"epoch": 9.150743099787686,
"grad_norm": 0.240234375,
"learning_rate": 4.165621029430855e-06,
"loss": 0.4925,
"step": 2155
},
{
"epoch": 9.171974522292993,
"grad_norm": 0.2421875,
"learning_rate": 3.9561400726628505e-06,
"loss": 0.4971,
"step": 2160
},
{
"epoch": 9.1932059447983,
"grad_norm": 0.240234375,
"learning_rate": 3.7519568068529855e-06,
"loss": 0.4901,
"step": 2165
},
{
"epoch": 9.21443736730361,
"grad_norm": 0.2412109375,
"learning_rate": 3.5530824945623542e-06,
"loss": 0.4841,
"step": 2170
},
{
"epoch": 9.235668789808917,
"grad_norm": 0.236328125,
"learning_rate": 3.359528105515064e-06,
"loss": 0.4937,
"step": 2175
},
{
"epoch": 9.256900212314225,
"grad_norm": 0.2470703125,
"learning_rate": 3.1713043159931734e-06,
"loss": 0.4952,
"step": 2180
},
{
"epoch": 9.278131634819532,
"grad_norm": 0.240234375,
"learning_rate": 2.9884215082477408e-06,
"loss": 0.487,
"step": 2185
},
{
"epoch": 9.299363057324841,
"grad_norm": 0.2353515625,
"learning_rate": 2.810889769926217e-06,
"loss": 0.489,
"step": 2190
},
{
"epoch": 9.320594479830149,
"grad_norm": 0.24609375,
"learning_rate": 2.6387188935159456e-06,
"loss": 0.4974,
"step": 2195
},
{
"epoch": 9.341825902335456,
"grad_norm": 0.2431640625,
"learning_rate": 2.471918375804105e-06,
"loss": 0.4921,
"step": 2200
},
{
"epoch": 9.363057324840764,
"grad_norm": 0.2470703125,
"learning_rate": 2.3104974173537743e-06,
"loss": 0.4915,
"step": 2205
},
{
"epoch": 9.384288747346073,
"grad_norm": 0.232421875,
"learning_rate": 2.1544649219965575e-06,
"loss": 0.4812,
"step": 2210
},
{
"epoch": 9.40552016985138,
"grad_norm": 0.2421875,
"learning_rate": 2.003829496341325e-06,
"loss": 0.4857,
"step": 2215
},
{
"epoch": 9.426751592356688,
"grad_norm": 0.24609375,
"learning_rate": 1.8585994492995916e-06,
"loss": 0.4962,
"step": 2220
},
{
"epoch": 9.447983014861995,
"grad_norm": 0.244140625,
"learning_rate": 1.7187827916271382e-06,
"loss": 0.4935,
"step": 2225
},
{
"epoch": 9.469214437367304,
"grad_norm": 0.2412109375,
"learning_rate": 1.5843872354822097e-06,
"loss": 0.4898,
"step": 2230
},
{
"epoch": 9.490445859872612,
"grad_norm": 0.2421875,
"learning_rate": 1.4554201940000123e-06,
"loss": 0.489,
"step": 2235
},
{
"epoch": 9.511677282377919,
"grad_norm": 0.2412109375,
"learning_rate": 1.3318887808839274e-06,
"loss": 0.4964,
"step": 2240
},
{
"epoch": 9.532908704883226,
"grad_norm": 0.2412109375,
"learning_rate": 1.21379981001305e-06,
"loss": 0.4868,
"step": 2245
},
{
"epoch": 9.554140127388536,
"grad_norm": 0.236328125,
"learning_rate": 1.1011597950663865e-06,
"loss": 0.495,
"step": 2250
},
{
"epoch": 9.575371549893843,
"grad_norm": 0.23828125,
"learning_rate": 9.939749491635341e-07,
"loss": 0.4956,
"step": 2255
},
{
"epoch": 9.59660297239915,
"grad_norm": 0.24609375,
"learning_rate": 8.922511845219971e-07,
"loss": 0.4862,
"step": 2260
},
{
"epoch": 9.617834394904458,
"grad_norm": 0.2373046875,
"learning_rate": 7.959941121310266e-07,
"loss": 0.4904,
"step": 2265
},
{
"epoch": 9.639065817409767,
"grad_norm": 0.2470703125,
"learning_rate": 7.052090414422119e-07,
"loss": 0.4892,
"step": 2270
},
{
"epoch": 9.660297239915074,
"grad_norm": 0.2451171875,
"learning_rate": 6.199009800765265e-07,
"loss": 0.5003,
"step": 2275
},
{
"epoch": 9.681528662420382,
"grad_norm": 0.2412109375,
"learning_rate": 5.400746335481488e-07,
"loss": 0.485,
"step": 2280
},
{
"epoch": 9.70276008492569,
"grad_norm": 0.248046875,
"learning_rate": 4.6573440500492504e-07,
"loss": 0.4933,
"step": 2285
},
{
"epoch": 9.723991507430998,
"grad_norm": 0.2421875,
"learning_rate": 3.96884394985475e-07,
"loss": 0.4817,
"step": 2290
},
{
"epoch": 9.745222929936306,
"grad_norm": 0.25,
"learning_rate": 3.335284011929951e-07,
"loss": 0.4828,
"step": 2295
},
{
"epoch": 9.766454352441613,
"grad_norm": 0.2578125,
"learning_rate": 2.756699182858369e-07,
"loss": 0.4922,
"step": 2300
},
{
"epoch": 9.787685774946922,
"grad_norm": 0.2470703125,
"learning_rate": 2.2331213768468363e-07,
"loss": 0.493,
"step": 2305
},
{
"epoch": 9.80891719745223,
"grad_norm": 0.251953125,
"learning_rate": 1.7645794739654665e-07,
"loss": 0.4972,
"step": 2310
},
{
"epoch": 9.830148619957537,
"grad_norm": 0.2431640625,
"learning_rate": 1.351099318554705e-07,
"loss": 0.4881,
"step": 2315
},
{
"epoch": 9.851380042462845,
"grad_norm": 0.251953125,
"learning_rate": 9.927037177993592e-08,
"loss": 0.49,
"step": 2320
},
{
"epoch": 9.872611464968152,
"grad_norm": 0.240234375,
"learning_rate": 6.894124404711599e-08,
"loss": 0.4973,
"step": 2325
},
{
"epoch": 9.893842887473461,
"grad_norm": 0.236328125,
"learning_rate": 4.4124221583785595e-08,
"loss": 0.4918,
"step": 2330
},
{
"epoch": 9.915074309978769,
"grad_norm": 0.244140625,
"learning_rate": 2.482067327409521e-08,
"loss": 0.4923,
"step": 2335
},
{
"epoch": 9.936305732484076,
"grad_norm": 0.2392578125,
"learning_rate": 1.103166388398691e-08,
"loss": 0.4791,
"step": 2340
},
{
"epoch": 9.957537154989385,
"grad_norm": 0.2470703125,
"learning_rate": 2.7579540025524097e-09,
"loss": 0.4883,
"step": 2345
},
{
"epoch": 9.978768577494693,
"grad_norm": 0.2451171875,
"learning_rate": 0.0,
"loss": 0.4904,
"step": 2350
},
{
"epoch": 9.978768577494693,
"eval_loss": 1.6824607849121094,
"eval_runtime": 0.4916,
"eval_samples_per_second": 10.17,
"eval_steps_per_second": 2.034,
"step": 2350
},
{
"epoch": 9.978768577494693,
"step": 2350,
"total_flos": 1.8676199416922112e+18,
"train_loss": 0.5910337928000917,
"train_runtime": 14730.3274,
"train_samples_per_second": 10.216,
"train_steps_per_second": 0.16
}
],
"logging_steps": 5,
"max_steps": 2350,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.8676199416922112e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}