TahalliAnas's picture
first upload from kaggle
0fa496e verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.0,
"eval_steps": 500,
"global_step": 3504,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0228310502283105,
"grad_norm": 2.228510856628418,
"learning_rate": 4.987157534246575e-05,
"loss": 6.6129,
"step": 10
},
{
"epoch": 0.045662100456621,
"grad_norm": 1.832844614982605,
"learning_rate": 4.9728881278538815e-05,
"loss": 3.1197,
"step": 20
},
{
"epoch": 0.0684931506849315,
"grad_norm": 1.3700411319732666,
"learning_rate": 4.958618721461187e-05,
"loss": 2.1387,
"step": 30
},
{
"epoch": 0.091324200913242,
"grad_norm": 1.3453809022903442,
"learning_rate": 4.9443493150684935e-05,
"loss": 1.4287,
"step": 40
},
{
"epoch": 0.1141552511415525,
"grad_norm": 1.195481777191162,
"learning_rate": 4.930079908675799e-05,
"loss": 0.9585,
"step": 50
},
{
"epoch": 0.136986301369863,
"grad_norm": 1.0389988422393799,
"learning_rate": 4.9158105022831055e-05,
"loss": 0.5942,
"step": 60
},
{
"epoch": 0.1598173515981735,
"grad_norm": 0.9327354431152344,
"learning_rate": 4.901541095890411e-05,
"loss": 0.4014,
"step": 70
},
{
"epoch": 0.182648401826484,
"grad_norm": 0.791429340839386,
"learning_rate": 4.8872716894977175e-05,
"loss": 0.2836,
"step": 80
},
{
"epoch": 0.2054794520547945,
"grad_norm": 0.7389950156211853,
"learning_rate": 4.873002283105023e-05,
"loss": 0.1995,
"step": 90
},
{
"epoch": 0.228310502283105,
"grad_norm": 0.6029524207115173,
"learning_rate": 4.8587328767123295e-05,
"loss": 0.1431,
"step": 100
},
{
"epoch": 0.2511415525114155,
"grad_norm": 0.5200534462928772,
"learning_rate": 4.844463470319635e-05,
"loss": 0.1019,
"step": 110
},
{
"epoch": 0.273972602739726,
"grad_norm": 0.6404463648796082,
"learning_rate": 4.830194063926941e-05,
"loss": 0.0846,
"step": 120
},
{
"epoch": 0.2968036529680365,
"grad_norm": 0.4442310631275177,
"learning_rate": 4.815924657534247e-05,
"loss": 0.0654,
"step": 130
},
{
"epoch": 0.319634703196347,
"grad_norm": 0.4031509757041931,
"learning_rate": 4.801655251141553e-05,
"loss": 0.0542,
"step": 140
},
{
"epoch": 0.3424657534246575,
"grad_norm": 0.4141863286495209,
"learning_rate": 4.7873858447488584e-05,
"loss": 0.0508,
"step": 150
},
{
"epoch": 0.365296803652968,
"grad_norm": 0.3440322279930115,
"learning_rate": 4.773116438356164e-05,
"loss": 0.0391,
"step": 160
},
{
"epoch": 0.3881278538812785,
"grad_norm": 0.6025522947311401,
"learning_rate": 4.7588470319634704e-05,
"loss": 0.0381,
"step": 170
},
{
"epoch": 0.410958904109589,
"grad_norm": 0.34279781579971313,
"learning_rate": 4.744577625570776e-05,
"loss": 0.0339,
"step": 180
},
{
"epoch": 0.4337899543378995,
"grad_norm": 0.3098609149456024,
"learning_rate": 4.7303082191780824e-05,
"loss": 0.0301,
"step": 190
},
{
"epoch": 0.45662100456621,
"grad_norm": 0.29201066493988037,
"learning_rate": 4.716038812785388e-05,
"loss": 0.0287,
"step": 200
},
{
"epoch": 0.4794520547945205,
"grad_norm": 0.2325911968946457,
"learning_rate": 4.7017694063926944e-05,
"loss": 0.0258,
"step": 210
},
{
"epoch": 0.502283105022831,
"grad_norm": 0.2296101152896881,
"learning_rate": 4.6875e-05,
"loss": 0.0227,
"step": 220
},
{
"epoch": 0.5251141552511416,
"grad_norm": 0.24569889903068542,
"learning_rate": 4.6732305936073064e-05,
"loss": 0.0232,
"step": 230
},
{
"epoch": 0.547945205479452,
"grad_norm": 0.22321540117263794,
"learning_rate": 4.658961187214612e-05,
"loss": 0.018,
"step": 240
},
{
"epoch": 0.5707762557077626,
"grad_norm": 0.26079800724983215,
"learning_rate": 4.6446917808219184e-05,
"loss": 0.0192,
"step": 250
},
{
"epoch": 0.593607305936073,
"grad_norm": 0.29277801513671875,
"learning_rate": 4.630422374429224e-05,
"loss": 0.02,
"step": 260
},
{
"epoch": 0.6164383561643836,
"grad_norm": 0.2620185315608978,
"learning_rate": 4.61615296803653e-05,
"loss": 0.0172,
"step": 270
},
{
"epoch": 0.639269406392694,
"grad_norm": 0.32734254002571106,
"learning_rate": 4.601883561643836e-05,
"loss": 0.0191,
"step": 280
},
{
"epoch": 0.6621004566210046,
"grad_norm": 0.34528958797454834,
"learning_rate": 4.587614155251142e-05,
"loss": 0.0151,
"step": 290
},
{
"epoch": 0.684931506849315,
"grad_norm": 0.16524860262870789,
"learning_rate": 4.5733447488584474e-05,
"loss": 0.0154,
"step": 300
},
{
"epoch": 0.7077625570776256,
"grad_norm": 0.17619894444942474,
"learning_rate": 4.559075342465753e-05,
"loss": 0.0141,
"step": 310
},
{
"epoch": 0.730593607305936,
"grad_norm": 0.18591953814029694,
"learning_rate": 4.5448059360730594e-05,
"loss": 0.0128,
"step": 320
},
{
"epoch": 0.7534246575342466,
"grad_norm": 0.17972640693187714,
"learning_rate": 4.530536529680365e-05,
"loss": 0.0119,
"step": 330
},
{
"epoch": 0.776255707762557,
"grad_norm": 0.2308385968208313,
"learning_rate": 4.5162671232876714e-05,
"loss": 0.0114,
"step": 340
},
{
"epoch": 0.7990867579908676,
"grad_norm": 0.13175222277641296,
"learning_rate": 4.501997716894977e-05,
"loss": 0.0107,
"step": 350
},
{
"epoch": 0.821917808219178,
"grad_norm": 0.1399480700492859,
"learning_rate": 4.4877283105022834e-05,
"loss": 0.0097,
"step": 360
},
{
"epoch": 0.8447488584474886,
"grad_norm": 0.12605524063110352,
"learning_rate": 4.473458904109589e-05,
"loss": 0.0112,
"step": 370
},
{
"epoch": 0.867579908675799,
"grad_norm": 0.16242380440235138,
"learning_rate": 4.4591894977168954e-05,
"loss": 0.0099,
"step": 380
},
{
"epoch": 0.8904109589041096,
"grad_norm": 0.12138999998569489,
"learning_rate": 4.444920091324201e-05,
"loss": 0.0089,
"step": 390
},
{
"epoch": 0.91324200913242,
"grad_norm": 0.22154438495635986,
"learning_rate": 4.4306506849315074e-05,
"loss": 0.009,
"step": 400
},
{
"epoch": 0.9360730593607306,
"grad_norm": 0.09284752607345581,
"learning_rate": 4.416381278538813e-05,
"loss": 0.0079,
"step": 410
},
{
"epoch": 0.958904109589041,
"grad_norm": 0.2589144706726074,
"learning_rate": 4.4021118721461194e-05,
"loss": 0.0077,
"step": 420
},
{
"epoch": 0.9817351598173516,
"grad_norm": 0.18040631711483002,
"learning_rate": 4.387842465753425e-05,
"loss": 0.0073,
"step": 430
},
{
"epoch": 1.0,
"eval_loss": 0.0013460684567689896,
"eval_runtime": 71.4572,
"eval_samples_per_second": 41.983,
"eval_steps_per_second": 2.631,
"step": 438
},
{
"epoch": 1.004566210045662,
"grad_norm": 0.16953226923942566,
"learning_rate": 4.373573059360731e-05,
"loss": 0.0071,
"step": 440
},
{
"epoch": 1.0273972602739727,
"grad_norm": 0.13055773079395294,
"learning_rate": 4.359303652968037e-05,
"loss": 0.0076,
"step": 450
},
{
"epoch": 1.0502283105022832,
"grad_norm": 0.11131970584392548,
"learning_rate": 4.345034246575343e-05,
"loss": 0.0072,
"step": 460
},
{
"epoch": 1.0730593607305936,
"grad_norm": 0.1048307716846466,
"learning_rate": 4.3307648401826484e-05,
"loss": 0.0053,
"step": 470
},
{
"epoch": 1.095890410958904,
"grad_norm": 0.191425159573555,
"learning_rate": 4.316495433789954e-05,
"loss": 0.0061,
"step": 480
},
{
"epoch": 1.1187214611872145,
"grad_norm": 0.1289547234773636,
"learning_rate": 4.3022260273972604e-05,
"loss": 0.0057,
"step": 490
},
{
"epoch": 1.1415525114155252,
"grad_norm": 0.09019248932600021,
"learning_rate": 4.287956621004566e-05,
"loss": 0.0049,
"step": 500
},
{
"epoch": 1.1643835616438356,
"grad_norm": 0.22927595674991608,
"learning_rate": 4.2736872146118724e-05,
"loss": 0.0051,
"step": 510
},
{
"epoch": 1.187214611872146,
"grad_norm": 0.1198509931564331,
"learning_rate": 4.259417808219178e-05,
"loss": 0.0047,
"step": 520
},
{
"epoch": 1.2100456621004567,
"grad_norm": 0.1271675080060959,
"learning_rate": 4.2451484018264844e-05,
"loss": 0.0051,
"step": 530
},
{
"epoch": 1.2328767123287672,
"grad_norm": 0.13385379314422607,
"learning_rate": 4.23087899543379e-05,
"loss": 0.0054,
"step": 540
},
{
"epoch": 1.2557077625570776,
"grad_norm": 0.06279306858778,
"learning_rate": 4.2166095890410964e-05,
"loss": 0.0046,
"step": 550
},
{
"epoch": 1.278538812785388,
"grad_norm": 0.12206210196018219,
"learning_rate": 4.202340182648402e-05,
"loss": 0.0046,
"step": 560
},
{
"epoch": 1.3013698630136985,
"grad_norm": 0.08528764545917511,
"learning_rate": 4.1880707762557084e-05,
"loss": 0.0049,
"step": 570
},
{
"epoch": 1.3242009132420092,
"grad_norm": 0.11532297730445862,
"learning_rate": 4.173801369863014e-05,
"loss": 0.0044,
"step": 580
},
{
"epoch": 1.3470319634703196,
"grad_norm": 0.1577356606721878,
"learning_rate": 4.1595319634703204e-05,
"loss": 0.0047,
"step": 590
},
{
"epoch": 1.36986301369863,
"grad_norm": 0.0654304102063179,
"learning_rate": 4.145262557077626e-05,
"loss": 0.0043,
"step": 600
},
{
"epoch": 1.3926940639269407,
"grad_norm": 0.13026106357574463,
"learning_rate": 4.130993150684932e-05,
"loss": 0.0046,
"step": 610
},
{
"epoch": 1.4155251141552512,
"grad_norm": 0.1981119066476822,
"learning_rate": 4.1167237442922374e-05,
"loss": 0.0036,
"step": 620
},
{
"epoch": 1.4383561643835616,
"grad_norm": 0.0626244992017746,
"learning_rate": 4.102454337899544e-05,
"loss": 0.0029,
"step": 630
},
{
"epoch": 1.461187214611872,
"grad_norm": 0.09634381532669067,
"learning_rate": 4.0881849315068494e-05,
"loss": 0.0035,
"step": 640
},
{
"epoch": 1.4840182648401825,
"grad_norm": 0.10267172008752823,
"learning_rate": 4.073915525114155e-05,
"loss": 0.0029,
"step": 650
},
{
"epoch": 1.5068493150684932,
"grad_norm": 0.1381852775812149,
"learning_rate": 4.0596461187214614e-05,
"loss": 0.0034,
"step": 660
},
{
"epoch": 1.5296803652968036,
"grad_norm": 0.05667397379875183,
"learning_rate": 4.045376712328767e-05,
"loss": 0.0034,
"step": 670
},
{
"epoch": 1.5525114155251143,
"grad_norm": 0.09371186792850494,
"learning_rate": 4.0311073059360734e-05,
"loss": 0.0035,
"step": 680
},
{
"epoch": 1.5753424657534247,
"grad_norm": 0.09487811475992203,
"learning_rate": 4.016837899543379e-05,
"loss": 0.0032,
"step": 690
},
{
"epoch": 1.5981735159817352,
"grad_norm": 0.09329680353403091,
"learning_rate": 4.0025684931506853e-05,
"loss": 0.0034,
"step": 700
},
{
"epoch": 1.6210045662100456,
"grad_norm": 0.10534738004207611,
"learning_rate": 3.988299086757991e-05,
"loss": 0.003,
"step": 710
},
{
"epoch": 1.643835616438356,
"grad_norm": 0.1082799881696701,
"learning_rate": 3.9740296803652973e-05,
"loss": 0.0032,
"step": 720
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.06728220731019974,
"learning_rate": 3.959760273972603e-05,
"loss": 0.0031,
"step": 730
},
{
"epoch": 1.6894977168949772,
"grad_norm": 0.22814679145812988,
"learning_rate": 3.9454908675799093e-05,
"loss": 0.003,
"step": 740
},
{
"epoch": 1.7123287671232876,
"grad_norm": 0.10962113738059998,
"learning_rate": 3.931221461187215e-05,
"loss": 0.003,
"step": 750
},
{
"epoch": 1.7351598173515983,
"grad_norm": 0.054916515946388245,
"learning_rate": 3.916952054794521e-05,
"loss": 0.0024,
"step": 760
},
{
"epoch": 1.7579908675799087,
"grad_norm": 0.08513092249631882,
"learning_rate": 3.902682648401826e-05,
"loss": 0.0027,
"step": 770
},
{
"epoch": 1.7808219178082192,
"grad_norm": 0.03006896749138832,
"learning_rate": 3.888413242009133e-05,
"loss": 0.0026,
"step": 780
},
{
"epoch": 1.8036529680365296,
"grad_norm": 0.0818442776799202,
"learning_rate": 3.874143835616438e-05,
"loss": 0.0025,
"step": 790
},
{
"epoch": 1.82648401826484,
"grad_norm": 0.06740756332874298,
"learning_rate": 3.859874429223744e-05,
"loss": 0.0019,
"step": 800
},
{
"epoch": 1.8493150684931505,
"grad_norm": 0.022570671513676643,
"learning_rate": 3.84560502283105e-05,
"loss": 0.0026,
"step": 810
},
{
"epoch": 1.8721461187214612,
"grad_norm": 0.06723079085350037,
"learning_rate": 3.831335616438356e-05,
"loss": 0.0021,
"step": 820
},
{
"epoch": 1.8949771689497716,
"grad_norm": 0.12359272688627243,
"learning_rate": 3.817066210045662e-05,
"loss": 0.0023,
"step": 830
},
{
"epoch": 1.9178082191780823,
"grad_norm": 0.07307197153568268,
"learning_rate": 3.802796803652968e-05,
"loss": 0.0024,
"step": 840
},
{
"epoch": 1.9406392694063928,
"grad_norm": 0.11846048384904861,
"learning_rate": 3.788527397260274e-05,
"loss": 0.0026,
"step": 850
},
{
"epoch": 1.9634703196347032,
"grad_norm": 0.043823979794979095,
"learning_rate": 3.77425799086758e-05,
"loss": 0.0026,
"step": 860
},
{
"epoch": 1.9863013698630136,
"grad_norm": 0.06740803271532059,
"learning_rate": 3.759988584474886e-05,
"loss": 0.0026,
"step": 870
},
{
"epoch": 2.0,
"eval_loss": 0.0005470711039379239,
"eval_runtime": 70.9617,
"eval_samples_per_second": 42.276,
"eval_steps_per_second": 2.649,
"step": 876
},
{
"epoch": 2.009132420091324,
"grad_norm": 0.07825632393360138,
"learning_rate": 3.745719178082192e-05,
"loss": 0.0023,
"step": 880
},
{
"epoch": 2.0319634703196345,
"grad_norm": 0.07475470006465912,
"learning_rate": 3.731449771689498e-05,
"loss": 0.0026,
"step": 890
},
{
"epoch": 2.0547945205479454,
"grad_norm": 0.027003251016139984,
"learning_rate": 3.717180365296804e-05,
"loss": 0.002,
"step": 900
},
{
"epoch": 2.077625570776256,
"grad_norm": 0.02263038419187069,
"learning_rate": 3.70291095890411e-05,
"loss": 0.0024,
"step": 910
},
{
"epoch": 2.1004566210045663,
"grad_norm": 0.10978245735168457,
"learning_rate": 3.688641552511416e-05,
"loss": 0.0016,
"step": 920
},
{
"epoch": 2.1232876712328768,
"grad_norm": 0.10029231756925583,
"learning_rate": 3.6743721461187216e-05,
"loss": 0.0019,
"step": 930
},
{
"epoch": 2.146118721461187,
"grad_norm": 0.13055719435214996,
"learning_rate": 3.660102739726027e-05,
"loss": 0.0024,
"step": 940
},
{
"epoch": 2.1689497716894977,
"grad_norm": 0.02834697626531124,
"learning_rate": 3.6458333333333336e-05,
"loss": 0.0021,
"step": 950
},
{
"epoch": 2.191780821917808,
"grad_norm": 0.03155607730150223,
"learning_rate": 3.631563926940639e-05,
"loss": 0.0017,
"step": 960
},
{
"epoch": 2.2146118721461185,
"grad_norm": 0.012841666117310524,
"learning_rate": 3.617294520547945e-05,
"loss": 0.0014,
"step": 970
},
{
"epoch": 2.237442922374429,
"grad_norm": 0.06208495423197746,
"learning_rate": 3.603025114155251e-05,
"loss": 0.0018,
"step": 980
},
{
"epoch": 2.26027397260274,
"grad_norm": 0.03939255326986313,
"learning_rate": 3.588755707762557e-05,
"loss": 0.0015,
"step": 990
},
{
"epoch": 2.2831050228310503,
"grad_norm": 0.0387921966612339,
"learning_rate": 3.574486301369863e-05,
"loss": 0.0016,
"step": 1000
},
{
"epoch": 2.3059360730593608,
"grad_norm": 0.037367332726716995,
"learning_rate": 3.560216894977169e-05,
"loss": 0.0017,
"step": 1010
},
{
"epoch": 2.328767123287671,
"grad_norm": 0.09546244889497757,
"learning_rate": 3.545947488584475e-05,
"loss": 0.0016,
"step": 1020
},
{
"epoch": 2.3515981735159817,
"grad_norm": 0.05310087651014328,
"learning_rate": 3.531678082191781e-05,
"loss": 0.0017,
"step": 1030
},
{
"epoch": 2.374429223744292,
"grad_norm": 0.13905133306980133,
"learning_rate": 3.517408675799087e-05,
"loss": 0.0016,
"step": 1040
},
{
"epoch": 2.3972602739726026,
"grad_norm": 0.13682198524475098,
"learning_rate": 3.503139269406393e-05,
"loss": 0.0015,
"step": 1050
},
{
"epoch": 2.4200913242009134,
"grad_norm": 0.07560446858406067,
"learning_rate": 3.488869863013699e-05,
"loss": 0.0019,
"step": 1060
},
{
"epoch": 2.442922374429224,
"grad_norm": 0.024818843230605125,
"learning_rate": 3.474600456621005e-05,
"loss": 0.0019,
"step": 1070
},
{
"epoch": 2.4657534246575343,
"grad_norm": 0.2794188857078552,
"learning_rate": 3.4603310502283106e-05,
"loss": 0.0014,
"step": 1080
},
{
"epoch": 2.4885844748858448,
"grad_norm": 0.03945288807153702,
"learning_rate": 3.446061643835616e-05,
"loss": 0.0013,
"step": 1090
},
{
"epoch": 2.5114155251141552,
"grad_norm": 0.028486991301178932,
"learning_rate": 3.4317922374429226e-05,
"loss": 0.0015,
"step": 1100
},
{
"epoch": 2.5342465753424657,
"grad_norm": 0.19093026220798492,
"learning_rate": 3.417522831050228e-05,
"loss": 0.0016,
"step": 1110
},
{
"epoch": 2.557077625570776,
"grad_norm": 0.01798168569803238,
"learning_rate": 3.4032534246575346e-05,
"loss": 0.0012,
"step": 1120
},
{
"epoch": 2.5799086757990866,
"grad_norm": 0.03545854985713959,
"learning_rate": 3.38898401826484e-05,
"loss": 0.0012,
"step": 1130
},
{
"epoch": 2.602739726027397,
"grad_norm": 0.0346076525747776,
"learning_rate": 3.374714611872146e-05,
"loss": 0.0013,
"step": 1140
},
{
"epoch": 2.625570776255708,
"grad_norm": 0.031317390501499176,
"learning_rate": 3.360445205479452e-05,
"loss": 0.0011,
"step": 1150
},
{
"epoch": 2.6484018264840183,
"grad_norm": 0.018010897561907768,
"learning_rate": 3.346175799086758e-05,
"loss": 0.0013,
"step": 1160
},
{
"epoch": 2.671232876712329,
"grad_norm": 0.04990324005484581,
"learning_rate": 3.331906392694064e-05,
"loss": 0.0017,
"step": 1170
},
{
"epoch": 2.6940639269406392,
"grad_norm": 0.10162694752216339,
"learning_rate": 3.31763698630137e-05,
"loss": 0.0015,
"step": 1180
},
{
"epoch": 2.7168949771689497,
"grad_norm": 0.05713967978954315,
"learning_rate": 3.303367579908676e-05,
"loss": 0.0014,
"step": 1190
},
{
"epoch": 2.73972602739726,
"grad_norm": 0.12799331545829773,
"learning_rate": 3.289098173515982e-05,
"loss": 0.0013,
"step": 1200
},
{
"epoch": 2.762557077625571,
"grad_norm": 0.022338515147566795,
"learning_rate": 3.274828767123288e-05,
"loss": 0.0013,
"step": 1210
},
{
"epoch": 2.7853881278538815,
"grad_norm": 0.01897992566227913,
"learning_rate": 3.260559360730594e-05,
"loss": 0.0011,
"step": 1220
},
{
"epoch": 2.808219178082192,
"grad_norm": 0.04136025533080101,
"learning_rate": 3.2462899543379e-05,
"loss": 0.0017,
"step": 1230
},
{
"epoch": 2.8310502283105023,
"grad_norm": 0.09205514937639236,
"learning_rate": 3.232020547945205e-05,
"loss": 0.0012,
"step": 1240
},
{
"epoch": 2.853881278538813,
"grad_norm": 0.05605219677090645,
"learning_rate": 3.2177511415525116e-05,
"loss": 0.0013,
"step": 1250
},
{
"epoch": 2.8767123287671232,
"grad_norm": 0.20732928812503815,
"learning_rate": 3.203481735159817e-05,
"loss": 0.0016,
"step": 1260
},
{
"epoch": 2.8995433789954337,
"grad_norm": 0.01767803728580475,
"learning_rate": 3.1892123287671236e-05,
"loss": 0.0011,
"step": 1270
},
{
"epoch": 2.922374429223744,
"grad_norm": 0.05888301134109497,
"learning_rate": 3.174942922374429e-05,
"loss": 0.0013,
"step": 1280
},
{
"epoch": 2.9452054794520546,
"grad_norm": 0.060406286269426346,
"learning_rate": 3.160673515981735e-05,
"loss": 0.0012,
"step": 1290
},
{
"epoch": 2.968036529680365,
"grad_norm": 0.016563862562179565,
"learning_rate": 3.146404109589041e-05,
"loss": 0.0011,
"step": 1300
},
{
"epoch": 2.990867579908676,
"grad_norm": 0.032936833798885345,
"learning_rate": 3.132134703196347e-05,
"loss": 0.0012,
"step": 1310
},
{
"epoch": 3.0,
"eval_loss": 2.3043350665830076e-05,
"eval_runtime": 71.3592,
"eval_samples_per_second": 42.041,
"eval_steps_per_second": 2.635,
"step": 1314
},
{
"epoch": 3.0136986301369864,
"grad_norm": 0.043910350650548935,
"learning_rate": 3.117865296803653e-05,
"loss": 0.0012,
"step": 1320
},
{
"epoch": 3.036529680365297,
"grad_norm": 0.051233988255262375,
"learning_rate": 3.103595890410959e-05,
"loss": 0.0012,
"step": 1330
},
{
"epoch": 3.0593607305936072,
"grad_norm": 0.0460955835878849,
"learning_rate": 3.089326484018265e-05,
"loss": 0.0012,
"step": 1340
},
{
"epoch": 3.0821917808219177,
"grad_norm": 0.11139486730098724,
"learning_rate": 3.075057077625571e-05,
"loss": 0.0015,
"step": 1350
},
{
"epoch": 3.105022831050228,
"grad_norm": 0.020884834229946136,
"learning_rate": 3.060787671232877e-05,
"loss": 0.0009,
"step": 1360
},
{
"epoch": 3.127853881278539,
"grad_norm": 0.04865699261426926,
"learning_rate": 3.046518264840183e-05,
"loss": 0.0009,
"step": 1370
},
{
"epoch": 3.1506849315068495,
"grad_norm": 0.04672854766249657,
"learning_rate": 3.032248858447489e-05,
"loss": 0.0009,
"step": 1380
},
{
"epoch": 3.17351598173516,
"grad_norm": 0.04292335361242294,
"learning_rate": 3.017979452054795e-05,
"loss": 0.0009,
"step": 1390
},
{
"epoch": 3.1963470319634704,
"grad_norm": 0.06898372620344162,
"learning_rate": 3.0037100456621002e-05,
"loss": 0.0009,
"step": 1400
},
{
"epoch": 3.219178082191781,
"grad_norm": 0.08381140232086182,
"learning_rate": 2.9894406392694062e-05,
"loss": 0.0012,
"step": 1410
},
{
"epoch": 3.2420091324200913,
"grad_norm": 0.009932724758982658,
"learning_rate": 2.9751712328767122e-05,
"loss": 0.0009,
"step": 1420
},
{
"epoch": 3.2648401826484017,
"grad_norm": 0.055948104709386826,
"learning_rate": 2.9609018264840182e-05,
"loss": 0.0012,
"step": 1430
},
{
"epoch": 3.287671232876712,
"grad_norm": 0.028575940057635307,
"learning_rate": 2.9466324200913242e-05,
"loss": 0.0011,
"step": 1440
},
{
"epoch": 3.3105022831050226,
"grad_norm": 0.023319421336054802,
"learning_rate": 2.9323630136986302e-05,
"loss": 0.0009,
"step": 1450
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.05601884797215462,
"learning_rate": 2.9180936073059362e-05,
"loss": 0.0009,
"step": 1460
},
{
"epoch": 3.356164383561644,
"grad_norm": 0.017812812700867653,
"learning_rate": 2.9038242009132422e-05,
"loss": 0.0008,
"step": 1470
},
{
"epoch": 3.3789954337899544,
"grad_norm": 0.015839802101254463,
"learning_rate": 2.8895547945205482e-05,
"loss": 0.0009,
"step": 1480
},
{
"epoch": 3.401826484018265,
"grad_norm": 0.013905179686844349,
"learning_rate": 2.8752853881278542e-05,
"loss": 0.0009,
"step": 1490
},
{
"epoch": 3.4246575342465753,
"grad_norm": 0.034228548407554626,
"learning_rate": 2.8610159817351602e-05,
"loss": 0.0009,
"step": 1500
},
{
"epoch": 3.4474885844748857,
"grad_norm": 0.1382512003183365,
"learning_rate": 2.846746575342466e-05,
"loss": 0.001,
"step": 1510
},
{
"epoch": 3.470319634703196,
"grad_norm": 0.026061363518238068,
"learning_rate": 2.832477168949772e-05,
"loss": 0.0007,
"step": 1520
},
{
"epoch": 3.493150684931507,
"grad_norm": 0.033779580146074295,
"learning_rate": 2.818207762557078e-05,
"loss": 0.0007,
"step": 1530
},
{
"epoch": 3.5159817351598175,
"grad_norm": 0.01672547496855259,
"learning_rate": 2.803938356164384e-05,
"loss": 0.0009,
"step": 1540
},
{
"epoch": 3.538812785388128,
"grad_norm": 0.03841786831617355,
"learning_rate": 2.7896689497716895e-05,
"loss": 0.0008,
"step": 1550
},
{
"epoch": 3.5616438356164384,
"grad_norm": 0.042960572987794876,
"learning_rate": 2.7753995433789952e-05,
"loss": 0.0008,
"step": 1560
},
{
"epoch": 3.584474885844749,
"grad_norm": 0.016713028773665428,
"learning_rate": 2.7611301369863012e-05,
"loss": 0.0007,
"step": 1570
},
{
"epoch": 3.6073059360730593,
"grad_norm": 0.04062510281801224,
"learning_rate": 2.7468607305936072e-05,
"loss": 0.0007,
"step": 1580
},
{
"epoch": 3.6301369863013697,
"grad_norm": 0.013800432905554771,
"learning_rate": 2.7325913242009132e-05,
"loss": 0.0009,
"step": 1590
},
{
"epoch": 3.65296803652968,
"grad_norm": 0.03654768317937851,
"learning_rate": 2.7183219178082192e-05,
"loss": 0.0008,
"step": 1600
},
{
"epoch": 3.6757990867579906,
"grad_norm": 0.04716182500123978,
"learning_rate": 2.7040525114155252e-05,
"loss": 0.0007,
"step": 1610
},
{
"epoch": 3.6986301369863015,
"grad_norm": 0.006938883103430271,
"learning_rate": 2.6897831050228312e-05,
"loss": 0.0005,
"step": 1620
},
{
"epoch": 3.721461187214612,
"grad_norm": 0.07364363968372345,
"learning_rate": 2.6755136986301372e-05,
"loss": 0.0007,
"step": 1630
},
{
"epoch": 3.7442922374429224,
"grad_norm": 0.028122154995799065,
"learning_rate": 2.6612442922374432e-05,
"loss": 0.0006,
"step": 1640
},
{
"epoch": 3.767123287671233,
"grad_norm": 0.008902553468942642,
"learning_rate": 2.6469748858447492e-05,
"loss": 0.0006,
"step": 1650
},
{
"epoch": 3.7899543378995433,
"grad_norm": 0.013538227416574955,
"learning_rate": 2.6327054794520552e-05,
"loss": 0.0007,
"step": 1660
},
{
"epoch": 3.8127853881278537,
"grad_norm": 0.01429641991853714,
"learning_rate": 2.6184360730593612e-05,
"loss": 0.0007,
"step": 1670
},
{
"epoch": 3.8356164383561646,
"grad_norm": 0.03232923895120621,
"learning_rate": 2.604166666666667e-05,
"loss": 0.0006,
"step": 1680
},
{
"epoch": 3.858447488584475,
"grad_norm": 0.008046794682741165,
"learning_rate": 2.589897260273973e-05,
"loss": 0.0008,
"step": 1690
},
{
"epoch": 3.8812785388127855,
"grad_norm": 0.04098529741168022,
"learning_rate": 2.575627853881279e-05,
"loss": 0.0006,
"step": 1700
},
{
"epoch": 3.904109589041096,
"grad_norm": 0.06163308396935463,
"learning_rate": 2.5613584474885845e-05,
"loss": 0.0007,
"step": 1710
},
{
"epoch": 3.9269406392694064,
"grad_norm": 0.022219218313694,
"learning_rate": 2.54708904109589e-05,
"loss": 0.0006,
"step": 1720
},
{
"epoch": 3.949771689497717,
"grad_norm": 0.09443829208612442,
"learning_rate": 2.532819634703196e-05,
"loss": 0.0007,
"step": 1730
},
{
"epoch": 3.9726027397260273,
"grad_norm": 0.03068207949399948,
"learning_rate": 2.518550228310502e-05,
"loss": 0.0007,
"step": 1740
},
{
"epoch": 3.9954337899543377,
"grad_norm": 0.09690997749567032,
"learning_rate": 2.504280821917808e-05,
"loss": 0.0007,
"step": 1750
},
{
"epoch": 4.0,
"eval_loss": 9.936958122125361e-06,
"eval_runtime": 71.0483,
"eval_samples_per_second": 42.225,
"eval_steps_per_second": 2.646,
"step": 1752
},
{
"epoch": 4.018264840182648,
"grad_norm": 0.011336731724441051,
"learning_rate": 2.490011415525114e-05,
"loss": 0.0006,
"step": 1760
},
{
"epoch": 4.041095890410959,
"grad_norm": 0.015287565998733044,
"learning_rate": 2.47574200913242e-05,
"loss": 0.0008,
"step": 1770
},
{
"epoch": 4.063926940639269,
"grad_norm": 0.008016168139874935,
"learning_rate": 2.461472602739726e-05,
"loss": 0.0007,
"step": 1780
},
{
"epoch": 4.0867579908675795,
"grad_norm": 0.06609506160020828,
"learning_rate": 2.447203196347032e-05,
"loss": 0.0006,
"step": 1790
},
{
"epoch": 4.109589041095891,
"grad_norm": 0.009175929240882397,
"learning_rate": 2.432933789954338e-05,
"loss": 0.0007,
"step": 1800
},
{
"epoch": 4.132420091324201,
"grad_norm": 0.0126581359654665,
"learning_rate": 2.418664383561644e-05,
"loss": 0.0005,
"step": 1810
},
{
"epoch": 4.155251141552512,
"grad_norm": 0.08836681395769119,
"learning_rate": 2.40439497716895e-05,
"loss": 0.001,
"step": 1820
},
{
"epoch": 4.178082191780822,
"grad_norm": 0.025666864588856697,
"learning_rate": 2.3901255707762558e-05,
"loss": 0.0006,
"step": 1830
},
{
"epoch": 4.200913242009133,
"grad_norm": 0.03877547010779381,
"learning_rate": 2.3758561643835618e-05,
"loss": 0.0005,
"step": 1840
},
{
"epoch": 4.223744292237443,
"grad_norm": 0.016599468886852264,
"learning_rate": 2.3615867579908678e-05,
"loss": 0.0006,
"step": 1850
},
{
"epoch": 4.2465753424657535,
"grad_norm": 0.06378292292356491,
"learning_rate": 2.3473173515981735e-05,
"loss": 0.0012,
"step": 1860
},
{
"epoch": 4.269406392694064,
"grad_norm": 0.014281521551311016,
"learning_rate": 2.3330479452054795e-05,
"loss": 0.0008,
"step": 1870
},
{
"epoch": 4.292237442922374,
"grad_norm": 0.09428286552429199,
"learning_rate": 2.3187785388127855e-05,
"loss": 0.0009,
"step": 1880
},
{
"epoch": 4.315068493150685,
"grad_norm": 0.006857575848698616,
"learning_rate": 2.3045091324200915e-05,
"loss": 0.0006,
"step": 1890
},
{
"epoch": 4.337899543378995,
"grad_norm": 0.08501231670379639,
"learning_rate": 2.2902397260273975e-05,
"loss": 0.0006,
"step": 1900
},
{
"epoch": 4.360730593607306,
"grad_norm": 0.01724555715918541,
"learning_rate": 2.275970319634703e-05,
"loss": 0.0006,
"step": 1910
},
{
"epoch": 4.383561643835616,
"grad_norm": 0.008119191974401474,
"learning_rate": 2.261700913242009e-05,
"loss": 0.0006,
"step": 1920
},
{
"epoch": 4.406392694063927,
"grad_norm": 0.026137014850974083,
"learning_rate": 2.247431506849315e-05,
"loss": 0.0005,
"step": 1930
},
{
"epoch": 4.429223744292237,
"grad_norm": 0.005971621256321669,
"learning_rate": 2.233162100456621e-05,
"loss": 0.0006,
"step": 1940
},
{
"epoch": 4.4520547945205475,
"grad_norm": 0.04149805009365082,
"learning_rate": 2.218892694063927e-05,
"loss": 0.0006,
"step": 1950
},
{
"epoch": 4.474885844748858,
"grad_norm": 0.00725915003567934,
"learning_rate": 2.204623287671233e-05,
"loss": 0.0005,
"step": 1960
},
{
"epoch": 4.497716894977169,
"grad_norm": 0.04520484060049057,
"learning_rate": 2.190353881278539e-05,
"loss": 0.0006,
"step": 1970
},
{
"epoch": 4.52054794520548,
"grad_norm": 0.029336770996451378,
"learning_rate": 2.1760844748858448e-05,
"loss": 0.0005,
"step": 1980
},
{
"epoch": 4.54337899543379,
"grad_norm": 0.09512148797512054,
"learning_rate": 2.1618150684931508e-05,
"loss": 0.0008,
"step": 1990
},
{
"epoch": 4.566210045662101,
"grad_norm": 0.042485009878873825,
"learning_rate": 2.1475456621004568e-05,
"loss": 0.0005,
"step": 2000
},
{
"epoch": 4.589041095890411,
"grad_norm": 0.013867770321667194,
"learning_rate": 2.1332762557077628e-05,
"loss": 0.0006,
"step": 2010
},
{
"epoch": 4.6118721461187215,
"grad_norm": 0.05775037035346031,
"learning_rate": 2.1190068493150684e-05,
"loss": 0.0006,
"step": 2020
},
{
"epoch": 4.634703196347032,
"grad_norm": 0.010395473800599575,
"learning_rate": 2.1047374429223744e-05,
"loss": 0.0006,
"step": 2030
},
{
"epoch": 4.657534246575342,
"grad_norm": 0.008579747751355171,
"learning_rate": 2.0904680365296804e-05,
"loss": 0.0005,
"step": 2040
},
{
"epoch": 4.680365296803653,
"grad_norm": 0.011320821940898895,
"learning_rate": 2.0761986301369864e-05,
"loss": 0.0004,
"step": 2050
},
{
"epoch": 4.703196347031963,
"grad_norm": 0.038680724799633026,
"learning_rate": 2.061929223744292e-05,
"loss": 0.0005,
"step": 2060
},
{
"epoch": 4.726027397260274,
"grad_norm": 0.014130471274256706,
"learning_rate": 2.047659817351598e-05,
"loss": 0.0005,
"step": 2070
},
{
"epoch": 4.748858447488584,
"grad_norm": 0.01286408118903637,
"learning_rate": 2.033390410958904e-05,
"loss": 0.0006,
"step": 2080
},
{
"epoch": 4.771689497716895,
"grad_norm": 0.06255431473255157,
"learning_rate": 2.01912100456621e-05,
"loss": 0.0006,
"step": 2090
},
{
"epoch": 4.794520547945205,
"grad_norm": 0.033248186111450195,
"learning_rate": 2.004851598173516e-05,
"loss": 0.0007,
"step": 2100
},
{
"epoch": 4.817351598173516,
"grad_norm": 0.008136480115354061,
"learning_rate": 1.990582191780822e-05,
"loss": 0.0007,
"step": 2110
},
{
"epoch": 4.840182648401827,
"grad_norm": 0.0758700743317604,
"learning_rate": 1.976312785388128e-05,
"loss": 0.0006,
"step": 2120
},
{
"epoch": 4.863013698630137,
"grad_norm": 0.008333769626915455,
"learning_rate": 1.962043378995434e-05,
"loss": 0.0005,
"step": 2130
},
{
"epoch": 4.885844748858448,
"grad_norm": 0.010320034809410572,
"learning_rate": 1.9477739726027397e-05,
"loss": 0.0004,
"step": 2140
},
{
"epoch": 4.908675799086758,
"grad_norm": 0.00360821932554245,
"learning_rate": 1.9335045662100457e-05,
"loss": 0.0006,
"step": 2150
},
{
"epoch": 4.931506849315069,
"grad_norm": 0.02473396062850952,
"learning_rate": 1.9192351598173517e-05,
"loss": 0.0005,
"step": 2160
},
{
"epoch": 4.954337899543379,
"grad_norm": 0.022373970597982407,
"learning_rate": 1.9049657534246577e-05,
"loss": 0.0006,
"step": 2170
},
{
"epoch": 4.9771689497716896,
"grad_norm": 0.04363209009170532,
"learning_rate": 1.8906963470319637e-05,
"loss": 0.0005,
"step": 2180
},
{
"epoch": 5.0,
"grad_norm": 0.0052858320996165276,
"learning_rate": 1.8764269406392694e-05,
"loss": 0.0005,
"step": 2190
},
{
"epoch": 5.0,
"eval_loss": 5.627029167953879e-06,
"eval_runtime": 71.2259,
"eval_samples_per_second": 42.12,
"eval_steps_per_second": 2.639,
"step": 2190
},
{
"epoch": 5.0228310502283104,
"grad_norm": 0.00950402021408081,
"learning_rate": 1.8621575342465754e-05,
"loss": 0.0005,
"step": 2200
},
{
"epoch": 5.045662100456621,
"grad_norm": 0.010009740479290485,
"learning_rate": 1.8478881278538814e-05,
"loss": 0.0005,
"step": 2210
},
{
"epoch": 5.068493150684931,
"grad_norm": 0.032611336559057236,
"learning_rate": 1.833618721461187e-05,
"loss": 0.0004,
"step": 2220
},
{
"epoch": 5.091324200913242,
"grad_norm": 0.00459344731643796,
"learning_rate": 1.819349315068493e-05,
"loss": 0.0005,
"step": 2230
},
{
"epoch": 5.114155251141552,
"grad_norm": 0.03434896096587181,
"learning_rate": 1.805079908675799e-05,
"loss": 0.0008,
"step": 2240
},
{
"epoch": 5.136986301369863,
"grad_norm": 0.030723148956894875,
"learning_rate": 1.790810502283105e-05,
"loss": 0.0004,
"step": 2250
},
{
"epoch": 5.159817351598173,
"grad_norm": 0.0043680015951395035,
"learning_rate": 1.776541095890411e-05,
"loss": 0.0003,
"step": 2260
},
{
"epoch": 5.182648401826484,
"grad_norm": 0.011316012591123581,
"learning_rate": 1.762271689497717e-05,
"loss": 0.0006,
"step": 2270
},
{
"epoch": 5.205479452054795,
"grad_norm": 0.006297079846262932,
"learning_rate": 1.748002283105023e-05,
"loss": 0.0005,
"step": 2280
},
{
"epoch": 5.228310502283105,
"grad_norm": 0.018946554511785507,
"learning_rate": 1.733732876712329e-05,
"loss": 0.0004,
"step": 2290
},
{
"epoch": 5.251141552511416,
"grad_norm": 0.017262902110815048,
"learning_rate": 1.7194634703196347e-05,
"loss": 0.0004,
"step": 2300
},
{
"epoch": 5.273972602739726,
"grad_norm": 0.010157715529203415,
"learning_rate": 1.7051940639269407e-05,
"loss": 0.0004,
"step": 2310
},
{
"epoch": 5.296803652968037,
"grad_norm": 0.012058730237185955,
"learning_rate": 1.6909246575342467e-05,
"loss": 0.0008,
"step": 2320
},
{
"epoch": 5.319634703196347,
"grad_norm": 0.006543818395584822,
"learning_rate": 1.6766552511415527e-05,
"loss": 0.0003,
"step": 2330
},
{
"epoch": 5.342465753424658,
"grad_norm": 0.010772732086479664,
"learning_rate": 1.6623858447488587e-05,
"loss": 0.0004,
"step": 2340
},
{
"epoch": 5.365296803652968,
"grad_norm": 0.0169979240745306,
"learning_rate": 1.6481164383561644e-05,
"loss": 0.0003,
"step": 2350
},
{
"epoch": 5.3881278538812785,
"grad_norm": 0.009800782427191734,
"learning_rate": 1.6338470319634704e-05,
"loss": 0.0003,
"step": 2360
},
{
"epoch": 5.410958904109589,
"grad_norm": 0.04885419085621834,
"learning_rate": 1.6195776255707764e-05,
"loss": 0.0004,
"step": 2370
},
{
"epoch": 5.433789954337899,
"grad_norm": 0.024770863354206085,
"learning_rate": 1.605308219178082e-05,
"loss": 0.0005,
"step": 2380
},
{
"epoch": 5.45662100456621,
"grad_norm": 0.0030876509845256805,
"learning_rate": 1.591038812785388e-05,
"loss": 0.0004,
"step": 2390
},
{
"epoch": 5.47945205479452,
"grad_norm": 0.018670551478862762,
"learning_rate": 1.576769406392694e-05,
"loss": 0.0004,
"step": 2400
},
{
"epoch": 5.502283105022831,
"grad_norm": 0.011356856673955917,
"learning_rate": 1.5625e-05,
"loss": 0.0006,
"step": 2410
},
{
"epoch": 5.525114155251142,
"grad_norm": 0.0847870260477066,
"learning_rate": 1.548230593607306e-05,
"loss": 0.0004,
"step": 2420
},
{
"epoch": 5.5479452054794525,
"grad_norm": 0.017403950914740562,
"learning_rate": 1.533961187214612e-05,
"loss": 0.0004,
"step": 2430
},
{
"epoch": 5.570776255707763,
"grad_norm": 0.17497682571411133,
"learning_rate": 1.519691780821918e-05,
"loss": 0.0006,
"step": 2440
},
{
"epoch": 5.593607305936073,
"grad_norm": 0.032679907977581024,
"learning_rate": 1.5054223744292237e-05,
"loss": 0.0003,
"step": 2450
},
{
"epoch": 5.616438356164384,
"grad_norm": 0.011194907128810883,
"learning_rate": 1.4911529680365297e-05,
"loss": 0.0004,
"step": 2460
},
{
"epoch": 5.639269406392694,
"grad_norm": 0.017079461365938187,
"learning_rate": 1.4768835616438357e-05,
"loss": 0.0004,
"step": 2470
},
{
"epoch": 5.662100456621005,
"grad_norm": 0.09856831282377243,
"learning_rate": 1.4626141552511415e-05,
"loss": 0.0004,
"step": 2480
},
{
"epoch": 5.684931506849315,
"grad_norm": 0.1313595324754715,
"learning_rate": 1.4483447488584475e-05,
"loss": 0.0007,
"step": 2490
},
{
"epoch": 5.707762557077626,
"grad_norm": 0.006652920041233301,
"learning_rate": 1.4340753424657535e-05,
"loss": 0.0004,
"step": 2500
},
{
"epoch": 5.730593607305936,
"grad_norm": 0.009232467040419579,
"learning_rate": 1.4198059360730595e-05,
"loss": 0.0004,
"step": 2510
},
{
"epoch": 5.7534246575342465,
"grad_norm": 0.013255695812404156,
"learning_rate": 1.4055365296803655e-05,
"loss": 0.0005,
"step": 2520
},
{
"epoch": 5.776255707762557,
"grad_norm": 0.017853327095508575,
"learning_rate": 1.3912671232876712e-05,
"loss": 0.0005,
"step": 2530
},
{
"epoch": 5.799086757990867,
"grad_norm": 0.011152304708957672,
"learning_rate": 1.3769977168949772e-05,
"loss": 0.0004,
"step": 2540
},
{
"epoch": 5.821917808219178,
"grad_norm": 0.00643182685598731,
"learning_rate": 1.3627283105022832e-05,
"loss": 0.0004,
"step": 2550
},
{
"epoch": 5.844748858447488,
"grad_norm": 0.016502410173416138,
"learning_rate": 1.348458904109589e-05,
"loss": 0.0005,
"step": 2560
},
{
"epoch": 5.867579908675799,
"grad_norm": 0.01771596260368824,
"learning_rate": 1.334189497716895e-05,
"loss": 0.0005,
"step": 2570
},
{
"epoch": 5.890410958904109,
"grad_norm": 0.024237055331468582,
"learning_rate": 1.319920091324201e-05,
"loss": 0.0004,
"step": 2580
},
{
"epoch": 5.91324200913242,
"grad_norm": 0.03518439084291458,
"learning_rate": 1.305650684931507e-05,
"loss": 0.0003,
"step": 2590
},
{
"epoch": 5.936073059360731,
"grad_norm": 0.014818640425801277,
"learning_rate": 1.291381278538813e-05,
"loss": 0.0004,
"step": 2600
},
{
"epoch": 5.958904109589041,
"grad_norm": 0.006442646961659193,
"learning_rate": 1.2771118721461187e-05,
"loss": 0.0005,
"step": 2610
},
{
"epoch": 5.981735159817352,
"grad_norm": 0.0037180872168391943,
"learning_rate": 1.2628424657534247e-05,
"loss": 0.0004,
"step": 2620
},
{
"epoch": 6.0,
"eval_loss": 3.286777655375772e-06,
"eval_runtime": 71.001,
"eval_samples_per_second": 42.253,
"eval_steps_per_second": 2.648,
"step": 2628
},
{
"epoch": 6.004566210045662,
"grad_norm": 0.00996373500674963,
"learning_rate": 1.2485730593607307e-05,
"loss": 0.0003,
"step": 2630
},
{
"epoch": 6.027397260273973,
"grad_norm": 0.0067086792550981045,
"learning_rate": 1.2343036529680365e-05,
"loss": 0.0005,
"step": 2640
},
{
"epoch": 6.050228310502283,
"grad_norm": 0.009166865609586239,
"learning_rate": 1.2200342465753425e-05,
"loss": 0.0005,
"step": 2650
},
{
"epoch": 6.073059360730594,
"grad_norm": 0.005400144029408693,
"learning_rate": 1.2057648401826485e-05,
"loss": 0.0003,
"step": 2660
},
{
"epoch": 6.095890410958904,
"grad_norm": 0.0033110773656517267,
"learning_rate": 1.1914954337899543e-05,
"loss": 0.0003,
"step": 2670
},
{
"epoch": 6.1187214611872145,
"grad_norm": 0.017671756446361542,
"learning_rate": 1.1772260273972603e-05,
"loss": 0.0003,
"step": 2680
},
{
"epoch": 6.141552511415525,
"grad_norm": 0.016568470746278763,
"learning_rate": 1.1629566210045663e-05,
"loss": 0.0005,
"step": 2690
},
{
"epoch": 6.164383561643835,
"grad_norm": 0.11980602890253067,
"learning_rate": 1.1486872146118723e-05,
"loss": 0.0004,
"step": 2700
},
{
"epoch": 6.187214611872146,
"grad_norm": 0.03170336037874222,
"learning_rate": 1.1344178082191781e-05,
"loss": 0.0004,
"step": 2710
},
{
"epoch": 6.210045662100456,
"grad_norm": 0.01595723256468773,
"learning_rate": 1.1201484018264841e-05,
"loss": 0.0006,
"step": 2720
},
{
"epoch": 6.232876712328767,
"grad_norm": 0.05525317043066025,
"learning_rate": 1.10587899543379e-05,
"loss": 0.0004,
"step": 2730
},
{
"epoch": 6.255707762557078,
"grad_norm": 0.00609338004142046,
"learning_rate": 1.091609589041096e-05,
"loss": 0.0004,
"step": 2740
},
{
"epoch": 6.2785388127853885,
"grad_norm": 0.061812903732061386,
"learning_rate": 1.0773401826484018e-05,
"loss": 0.0004,
"step": 2750
},
{
"epoch": 6.301369863013699,
"grad_norm": 0.004055253695696592,
"learning_rate": 1.0630707762557078e-05,
"loss": 0.0003,
"step": 2760
},
{
"epoch": 6.324200913242009,
"grad_norm": 0.010857106186449528,
"learning_rate": 1.0488013698630138e-05,
"loss": 0.0004,
"step": 2770
},
{
"epoch": 6.34703196347032,
"grad_norm": 0.008729905821383,
"learning_rate": 1.0345319634703198e-05,
"loss": 0.0003,
"step": 2780
},
{
"epoch": 6.36986301369863,
"grad_norm": 0.003697366453707218,
"learning_rate": 1.0202625570776256e-05,
"loss": 0.0003,
"step": 2790
},
{
"epoch": 6.392694063926941,
"grad_norm": 0.0052353376522660255,
"learning_rate": 1.0059931506849316e-05,
"loss": 0.0003,
"step": 2800
},
{
"epoch": 6.415525114155251,
"grad_norm": 0.06486661732196808,
"learning_rate": 9.917237442922375e-06,
"loss": 0.0003,
"step": 2810
},
{
"epoch": 6.438356164383562,
"grad_norm": 0.036493659019470215,
"learning_rate": 9.774543378995435e-06,
"loss": 0.0004,
"step": 2820
},
{
"epoch": 6.461187214611872,
"grad_norm": 0.010287893004715443,
"learning_rate": 9.631849315068493e-06,
"loss": 0.0005,
"step": 2830
},
{
"epoch": 6.4840182648401825,
"grad_norm": 0.04603143036365509,
"learning_rate": 9.489155251141553e-06,
"loss": 0.0005,
"step": 2840
},
{
"epoch": 6.506849315068493,
"grad_norm": 0.016138222068548203,
"learning_rate": 9.346461187214613e-06,
"loss": 0.0004,
"step": 2850
},
{
"epoch": 6.529680365296803,
"grad_norm": 0.018696075305342674,
"learning_rate": 9.203767123287671e-06,
"loss": 0.0003,
"step": 2860
},
{
"epoch": 6.552511415525114,
"grad_norm": 0.009262233041226864,
"learning_rate": 9.061073059360731e-06,
"loss": 0.0003,
"step": 2870
},
{
"epoch": 6.575342465753424,
"grad_norm": 0.005299023352563381,
"learning_rate": 8.918378995433791e-06,
"loss": 0.0003,
"step": 2880
},
{
"epoch": 6.598173515981735,
"grad_norm": 0.00924585945904255,
"learning_rate": 8.77568493150685e-06,
"loss": 0.0005,
"step": 2890
},
{
"epoch": 6.621004566210045,
"grad_norm": 0.023650696501135826,
"learning_rate": 8.632990867579908e-06,
"loss": 0.0003,
"step": 2900
},
{
"epoch": 6.6438356164383565,
"grad_norm": 0.005081293638795614,
"learning_rate": 8.490296803652968e-06,
"loss": 0.0003,
"step": 2910
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.012082752771675587,
"learning_rate": 8.347602739726028e-06,
"loss": 0.0003,
"step": 2920
},
{
"epoch": 6.689497716894977,
"grad_norm": 0.004923298954963684,
"learning_rate": 8.204908675799088e-06,
"loss": 0.0004,
"step": 2930
},
{
"epoch": 6.712328767123288,
"grad_norm": 0.012247243896126747,
"learning_rate": 8.062214611872146e-06,
"loss": 0.0003,
"step": 2940
},
{
"epoch": 6.735159817351598,
"grad_norm": 0.003992004320025444,
"learning_rate": 7.919520547945206e-06,
"loss": 0.0004,
"step": 2950
},
{
"epoch": 6.757990867579909,
"grad_norm": 0.11996075510978699,
"learning_rate": 7.776826484018266e-06,
"loss": 0.0004,
"step": 2960
},
{
"epoch": 6.780821917808219,
"grad_norm": 0.005172852426767349,
"learning_rate": 7.634132420091324e-06,
"loss": 0.0003,
"step": 2970
},
{
"epoch": 6.80365296803653,
"grad_norm": 0.006295430473983288,
"learning_rate": 7.4914383561643835e-06,
"loss": 0.0004,
"step": 2980
},
{
"epoch": 6.82648401826484,
"grad_norm": 0.004225455224514008,
"learning_rate": 7.3487442922374434e-06,
"loss": 0.0003,
"step": 2990
},
{
"epoch": 6.8493150684931505,
"grad_norm": 0.0032389916013926268,
"learning_rate": 7.206050228310503e-06,
"loss": 0.0003,
"step": 3000
},
{
"epoch": 6.872146118721461,
"grad_norm": 0.038855355232954025,
"learning_rate": 7.063356164383563e-06,
"loss": 0.0003,
"step": 3010
},
{
"epoch": 6.894977168949771,
"grad_norm": 0.003689020639285445,
"learning_rate": 6.920662100456621e-06,
"loss": 0.0004,
"step": 3020
},
{
"epoch": 6.917808219178082,
"grad_norm": 0.01840381883084774,
"learning_rate": 6.777968036529681e-06,
"loss": 0.0003,
"step": 3030
},
{
"epoch": 6.940639269406392,
"grad_norm": 0.00360796507447958,
"learning_rate": 6.63527397260274e-06,
"loss": 0.0003,
"step": 3040
},
{
"epoch": 6.963470319634704,
"grad_norm": 0.01734967716038227,
"learning_rate": 6.4925799086758e-06,
"loss": 0.0003,
"step": 3050
},
{
"epoch": 6.986301369863014,
"grad_norm": 0.03626614063978195,
"learning_rate": 6.349885844748858e-06,
"loss": 0.0003,
"step": 3060
},
{
"epoch": 7.0,
"eval_loss": 2.8402284897310892e-06,
"eval_runtime": 70.9798,
"eval_samples_per_second": 42.266,
"eval_steps_per_second": 2.649,
"step": 3066
},
{
"epoch": 7.0091324200913245,
"grad_norm": 0.029538586735725403,
"learning_rate": 6.207191780821918e-06,
"loss": 0.0003,
"step": 3070
},
{
"epoch": 7.031963470319635,
"grad_norm": 0.011442271992564201,
"learning_rate": 6.0644977168949774e-06,
"loss": 0.0003,
"step": 3080
},
{
"epoch": 7.054794520547945,
"grad_norm": 0.009785550646483898,
"learning_rate": 5.921803652968037e-06,
"loss": 0.0003,
"step": 3090
},
{
"epoch": 7.077625570776256,
"grad_norm": 0.048744283616542816,
"learning_rate": 5.779109589041097e-06,
"loss": 0.0006,
"step": 3100
},
{
"epoch": 7.100456621004566,
"grad_norm": 0.007315410766750574,
"learning_rate": 5.636415525114156e-06,
"loss": 0.0002,
"step": 3110
},
{
"epoch": 7.123287671232877,
"grad_norm": 0.007827061228454113,
"learning_rate": 5.493721461187215e-06,
"loss": 0.0004,
"step": 3120
},
{
"epoch": 7.146118721461187,
"grad_norm": 0.0036791411694139242,
"learning_rate": 5.351027397260274e-06,
"loss": 0.0003,
"step": 3130
},
{
"epoch": 7.168949771689498,
"grad_norm": 0.010082785040140152,
"learning_rate": 5.208333333333334e-06,
"loss": 0.0003,
"step": 3140
},
{
"epoch": 7.191780821917808,
"grad_norm": 0.008839433081448078,
"learning_rate": 5.065639269406393e-06,
"loss": 0.0003,
"step": 3150
},
{
"epoch": 7.2146118721461185,
"grad_norm": 0.009164104238152504,
"learning_rate": 4.922945205479452e-06,
"loss": 0.0003,
"step": 3160
},
{
"epoch": 7.237442922374429,
"grad_norm": 0.06758883595466614,
"learning_rate": 4.7802511415525114e-06,
"loss": 0.0003,
"step": 3170
},
{
"epoch": 7.260273972602739,
"grad_norm": 0.0034141126088798046,
"learning_rate": 4.6375570776255714e-06,
"loss": 0.0004,
"step": 3180
},
{
"epoch": 7.28310502283105,
"grad_norm": 0.009116005152463913,
"learning_rate": 4.494863013698631e-06,
"loss": 0.0003,
"step": 3190
},
{
"epoch": 7.30593607305936,
"grad_norm": 0.014750838279724121,
"learning_rate": 4.35216894977169e-06,
"loss": 0.0003,
"step": 3200
},
{
"epoch": 7.328767123287671,
"grad_norm": 0.013239488005638123,
"learning_rate": 4.209474885844749e-06,
"loss": 0.0003,
"step": 3210
},
{
"epoch": 7.351598173515982,
"grad_norm": 0.009504084475338459,
"learning_rate": 4.066780821917809e-06,
"loss": 0.0004,
"step": 3220
},
{
"epoch": 7.3744292237442925,
"grad_norm": 0.018879897892475128,
"learning_rate": 3.924086757990868e-06,
"loss": 0.0004,
"step": 3230
},
{
"epoch": 7.397260273972603,
"grad_norm": 0.01805570162832737,
"learning_rate": 3.7813926940639276e-06,
"loss": 0.0003,
"step": 3240
},
{
"epoch": 7.420091324200913,
"grad_norm": 0.08645830303430557,
"learning_rate": 3.6386986301369863e-06,
"loss": 0.0005,
"step": 3250
},
{
"epoch": 7.442922374429224,
"grad_norm": 0.01047169417142868,
"learning_rate": 3.4960045662100463e-06,
"loss": 0.0003,
"step": 3260
},
{
"epoch": 7.465753424657534,
"grad_norm": 0.00378281413577497,
"learning_rate": 3.353310502283105e-06,
"loss": 0.0003,
"step": 3270
},
{
"epoch": 7.488584474885845,
"grad_norm": 0.008809005841612816,
"learning_rate": 3.210616438356165e-06,
"loss": 0.0003,
"step": 3280
},
{
"epoch": 7.511415525114155,
"grad_norm": 0.006725851446390152,
"learning_rate": 3.0679223744292237e-06,
"loss": 0.0003,
"step": 3290
},
{
"epoch": 7.534246575342466,
"grad_norm": 0.011108343489468098,
"learning_rate": 2.9252283105022833e-06,
"loss": 0.0003,
"step": 3300
},
{
"epoch": 7.557077625570776,
"grad_norm": 0.006323591805994511,
"learning_rate": 2.7825342465753424e-06,
"loss": 0.0003,
"step": 3310
},
{
"epoch": 7.579908675799087,
"grad_norm": 0.01762283407151699,
"learning_rate": 2.639840182648402e-06,
"loss": 0.0003,
"step": 3320
},
{
"epoch": 7.602739726027397,
"grad_norm": 0.003676204476505518,
"learning_rate": 2.497146118721461e-06,
"loss": 0.0002,
"step": 3330
},
{
"epoch": 7.6255707762557075,
"grad_norm": 0.04348291829228401,
"learning_rate": 2.3544520547945207e-06,
"loss": 0.0003,
"step": 3340
},
{
"epoch": 7.648401826484018,
"grad_norm": 0.01973540708422661,
"learning_rate": 2.21175799086758e-06,
"loss": 0.0004,
"step": 3350
},
{
"epoch": 7.671232876712329,
"grad_norm": 0.0020602825097739697,
"learning_rate": 2.0690639269406394e-06,
"loss": 0.0003,
"step": 3360
},
{
"epoch": 7.69406392694064,
"grad_norm": 0.031328316777944565,
"learning_rate": 1.9263698630136986e-06,
"loss": 0.0003,
"step": 3370
},
{
"epoch": 7.71689497716895,
"grad_norm": 0.004346560686826706,
"learning_rate": 1.7836757990867581e-06,
"loss": 0.0003,
"step": 3380
},
{
"epoch": 7.739726027397261,
"grad_norm": 0.020926913246512413,
"learning_rate": 1.6409817351598175e-06,
"loss": 0.0003,
"step": 3390
},
{
"epoch": 7.762557077625571,
"grad_norm": 0.012918849475681782,
"learning_rate": 1.4982876712328766e-06,
"loss": 0.0005,
"step": 3400
},
{
"epoch": 7.7853881278538815,
"grad_norm": 0.03088468685746193,
"learning_rate": 1.355593607305936e-06,
"loss": 0.0004,
"step": 3410
},
{
"epoch": 7.808219178082192,
"grad_norm": 0.02280372381210327,
"learning_rate": 1.2128995433789954e-06,
"loss": 0.0006,
"step": 3420
},
{
"epoch": 7.831050228310502,
"grad_norm": 0.0038483564276248217,
"learning_rate": 1.0702054794520547e-06,
"loss": 0.0003,
"step": 3430
},
{
"epoch": 7.853881278538813,
"grad_norm": 0.026072148233652115,
"learning_rate": 9.275114155251142e-07,
"loss": 0.0004,
"step": 3440
},
{
"epoch": 7.876712328767123,
"grad_norm": 0.05634905397891998,
"learning_rate": 7.848173515981735e-07,
"loss": 0.0003,
"step": 3450
},
{
"epoch": 7.899543378995434,
"grad_norm": 0.003738977015018463,
"learning_rate": 6.421232876712329e-07,
"loss": 0.0003,
"step": 3460
},
{
"epoch": 7.922374429223744,
"grad_norm": 0.00907827913761139,
"learning_rate": 4.994292237442923e-07,
"loss": 0.0003,
"step": 3470
},
{
"epoch": 7.945205479452055,
"grad_norm": 0.02188229374587536,
"learning_rate": 3.567351598173516e-07,
"loss": 0.0003,
"step": 3480
},
{
"epoch": 7.968036529680365,
"grad_norm": 0.020626889541745186,
"learning_rate": 2.1404109589041096e-07,
"loss": 0.0003,
"step": 3490
},
{
"epoch": 7.9908675799086755,
"grad_norm": 0.010182246565818787,
"learning_rate": 7.134703196347032e-08,
"loss": 0.0004,
"step": 3500
},
{
"epoch": 8.0,
"eval_loss": 2.446558710289537e-06,
"eval_runtime": 70.9518,
"eval_samples_per_second": 42.282,
"eval_steps_per_second": 2.65,
"step": 3504
}
],
"logging_steps": 10,
"max_steps": 3504,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8742142387077120.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}