SpireLab's picture
Upload folder using huggingface_hub
137c748 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9839064716528885,
"eval_steps": 500,
"global_step": 30500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009783299907058651,
"grad_norm": 33.780609130859375,
"learning_rate": 1.6302575806977503e-06,
"loss": 6.574,
"step": 100
},
{
"epoch": 0.019566599814117302,
"grad_norm": 33.439022064208984,
"learning_rate": 3.2605151613955006e-06,
"loss": 6.1653,
"step": 200
},
{
"epoch": 0.02934989972117595,
"grad_norm": 27.25751304626465,
"learning_rate": 4.890772742093251e-06,
"loss": 5.5515,
"step": 300
},
{
"epoch": 0.039133199628234604,
"grad_norm": 38.36979675292969,
"learning_rate": 6.521030322791001e-06,
"loss": 5.0531,
"step": 400
},
{
"epoch": 0.048916499535293256,
"grad_norm": 29.350488662719727,
"learning_rate": 8.15128790348875e-06,
"loss": 4.9225,
"step": 500
},
{
"epoch": 0.048916499535293256,
"eval_runtime": 181.5812,
"eval_samples_per_second": 112.578,
"eval_steps_per_second": 14.076,
"step": 500
},
{
"epoch": 0.0586997994423519,
"grad_norm": 33.02122497558594,
"learning_rate": 9.781545484186502e-06,
"loss": 4.8186,
"step": 600
},
{
"epoch": 0.06848309934941056,
"grad_norm": 42.41593933105469,
"learning_rate": 1.1411803064884251e-05,
"loss": 4.5769,
"step": 700
},
{
"epoch": 0.07826639925646921,
"grad_norm": 40.29044723510742,
"learning_rate": 1.3042060645582003e-05,
"loss": 4.3963,
"step": 800
},
{
"epoch": 0.08804969916352785,
"grad_norm": 38.0811653137207,
"learning_rate": 1.4672318226279752e-05,
"loss": 4.3393,
"step": 900
},
{
"epoch": 0.09783299907058651,
"grad_norm": 36.08370590209961,
"learning_rate": 1.63025758069775e-05,
"loss": 4.2421,
"step": 1000
},
{
"epoch": 0.09783299907058651,
"eval_runtime": 181.886,
"eval_samples_per_second": 112.389,
"eval_steps_per_second": 14.053,
"step": 1000
},
{
"epoch": 0.10761629897764516,
"grad_norm": 37.253684997558594,
"learning_rate": 1.7932833387675256e-05,
"loss": 4.1156,
"step": 1100
},
{
"epoch": 0.1173995988847038,
"grad_norm": 33.003475189208984,
"learning_rate": 1.9563090968373004e-05,
"loss": 4.0112,
"step": 1200
},
{
"epoch": 0.12718289879176245,
"grad_norm": 30.727867126464844,
"learning_rate": 2.1193348549070755e-05,
"loss": 3.9969,
"step": 1300
},
{
"epoch": 0.13696619869882112,
"grad_norm": 37.471092224121094,
"learning_rate": 2.2823606129768503e-05,
"loss": 3.874,
"step": 1400
},
{
"epoch": 0.14674949860587977,
"grad_norm": 42.32167434692383,
"learning_rate": 2.4453863710466254e-05,
"loss": 3.8518,
"step": 1500
},
{
"epoch": 0.14674949860587977,
"eval_runtime": 181.9332,
"eval_samples_per_second": 112.36,
"eval_steps_per_second": 14.049,
"step": 1500
},
{
"epoch": 0.15653279851293841,
"grad_norm": 38.00124740600586,
"learning_rate": 2.6084121291164005e-05,
"loss": 3.918,
"step": 1600
},
{
"epoch": 0.16631609841999706,
"grad_norm": 44.637386322021484,
"learning_rate": 2.7714378871861756e-05,
"loss": 3.9134,
"step": 1700
},
{
"epoch": 0.1760993983270557,
"grad_norm": 49.578609466552734,
"learning_rate": 2.9344636452559504e-05,
"loss": 3.7507,
"step": 1800
},
{
"epoch": 0.18588269823411438,
"grad_norm": 36.65715789794922,
"learning_rate": 3.0974894033257255e-05,
"loss": 3.7551,
"step": 1900
},
{
"epoch": 0.19566599814117303,
"grad_norm": 36.873443603515625,
"learning_rate": 3.2605151613955e-05,
"loss": 3.6951,
"step": 2000
},
{
"epoch": 0.19566599814117303,
"eval_runtime": 181.8273,
"eval_samples_per_second": 112.425,
"eval_steps_per_second": 14.057,
"step": 2000
},
{
"epoch": 0.20544929804823167,
"grad_norm": 33.025413513183594,
"learning_rate": 3.423540919465276e-05,
"loss": 3.6603,
"step": 2100
},
{
"epoch": 0.21523259795529032,
"grad_norm": 30.105051040649414,
"learning_rate": 3.586566677535051e-05,
"loss": 3.525,
"step": 2200
},
{
"epoch": 0.22501589786234896,
"grad_norm": 34.5129280090332,
"learning_rate": 3.749592435604825e-05,
"loss": 3.6454,
"step": 2300
},
{
"epoch": 0.2347991977694076,
"grad_norm": 33.16934585571289,
"learning_rate": 3.912618193674601e-05,
"loss": 3.6356,
"step": 2400
},
{
"epoch": 0.24458249767646628,
"grad_norm": 33.5789794921875,
"learning_rate": 4.0756439517443756e-05,
"loss": 3.5605,
"step": 2500
},
{
"epoch": 0.24458249767646628,
"eval_runtime": 181.7254,
"eval_samples_per_second": 112.488,
"eval_steps_per_second": 14.065,
"step": 2500
},
{
"epoch": 0.2543657975835249,
"grad_norm": 34.30876159667969,
"learning_rate": 4.238669709814151e-05,
"loss": 3.5447,
"step": 2600
},
{
"epoch": 0.2641490974905836,
"grad_norm": 29.907989501953125,
"learning_rate": 4.401695467883926e-05,
"loss": 3.5116,
"step": 2700
},
{
"epoch": 0.27393239739764225,
"grad_norm": 34.08231735229492,
"learning_rate": 4.5647212259537006e-05,
"loss": 3.4941,
"step": 2800
},
{
"epoch": 0.28371569730470086,
"grad_norm": 25.034149169921875,
"learning_rate": 4.727746984023476e-05,
"loss": 3.4863,
"step": 2900
},
{
"epoch": 0.29349899721175954,
"grad_norm": 32.21685028076172,
"learning_rate": 4.890772742093251e-05,
"loss": 3.5096,
"step": 3000
},
{
"epoch": 0.29349899721175954,
"eval_runtime": 181.6612,
"eval_samples_per_second": 112.528,
"eval_steps_per_second": 14.07,
"step": 3000
},
{
"epoch": 0.30328229711881816,
"grad_norm": 24.290380477905273,
"learning_rate": 4.9940208725902305e-05,
"loss": 3.3867,
"step": 3100
},
{
"epoch": 0.31306559702587683,
"grad_norm": 22.924575805664062,
"learning_rate": 4.975902304681838e-05,
"loss": 3.398,
"step": 3200
},
{
"epoch": 0.3228488969329355,
"grad_norm": 19.540430068969727,
"learning_rate": 4.957783736773446e-05,
"loss": 3.3727,
"step": 3300
},
{
"epoch": 0.3326321968399941,
"grad_norm": 22.529376983642578,
"learning_rate": 4.939665168865053e-05,
"loss": 3.3364,
"step": 3400
},
{
"epoch": 0.3424154967470528,
"grad_norm": 20.821264266967773,
"learning_rate": 4.921546600956661e-05,
"loss": 3.3126,
"step": 3500
},
{
"epoch": 0.3424154967470528,
"eval_runtime": 181.7582,
"eval_samples_per_second": 112.468,
"eval_steps_per_second": 14.063,
"step": 3500
},
{
"epoch": 0.3521987966541114,
"grad_norm": 24.346153259277344,
"learning_rate": 4.903428033048268e-05,
"loss": 3.2678,
"step": 3600
},
{
"epoch": 0.3619820965611701,
"grad_norm": 19.89035415649414,
"learning_rate": 4.8853094651398754e-05,
"loss": 3.3233,
"step": 3700
},
{
"epoch": 0.37176539646822876,
"grad_norm": 17.938880920410156,
"learning_rate": 4.8671908972314825e-05,
"loss": 3.2822,
"step": 3800
},
{
"epoch": 0.3815486963752874,
"grad_norm": 16.92071533203125,
"learning_rate": 4.84907232932309e-05,
"loss": 3.2254,
"step": 3900
},
{
"epoch": 0.39133199628234605,
"grad_norm": 18.241249084472656,
"learning_rate": 4.830953761414698e-05,
"loss": 3.2116,
"step": 4000
},
{
"epoch": 0.39133199628234605,
"eval_runtime": 182.8906,
"eval_samples_per_second": 111.772,
"eval_steps_per_second": 13.976,
"step": 4000
},
{
"epoch": 0.40111529618940467,
"grad_norm": 17.56020736694336,
"learning_rate": 4.812835193506305e-05,
"loss": 3.2232,
"step": 4100
},
{
"epoch": 0.41089859609646334,
"grad_norm": 17.81117057800293,
"learning_rate": 4.794716625597913e-05,
"loss": 3.1936,
"step": 4200
},
{
"epoch": 0.420681896003522,
"grad_norm": 19.89581871032715,
"learning_rate": 4.77659805768952e-05,
"loss": 3.1443,
"step": 4300
},
{
"epoch": 0.43046519591058063,
"grad_norm": 22.968582153320312,
"learning_rate": 4.758479489781128e-05,
"loss": 3.2084,
"step": 4400
},
{
"epoch": 0.4402484958176393,
"grad_norm": 17.119598388671875,
"learning_rate": 4.740360921872735e-05,
"loss": 3.1263,
"step": 4500
},
{
"epoch": 0.4402484958176393,
"eval_runtime": 182.3246,
"eval_samples_per_second": 112.119,
"eval_steps_per_second": 14.019,
"step": 4500
},
{
"epoch": 0.4500317957246979,
"grad_norm": 19.294527053833008,
"learning_rate": 4.722242353964343e-05,
"loss": 3.1327,
"step": 4600
},
{
"epoch": 0.4598150956317566,
"grad_norm": 16.941057205200195,
"learning_rate": 4.704123786055951e-05,
"loss": 3.0944,
"step": 4700
},
{
"epoch": 0.4695983955388152,
"grad_norm": 22.43411636352539,
"learning_rate": 4.686005218147558e-05,
"loss": 3.1093,
"step": 4800
},
{
"epoch": 0.4793816954458739,
"grad_norm": 19.64097023010254,
"learning_rate": 4.667886650239166e-05,
"loss": 3.0597,
"step": 4900
},
{
"epoch": 0.48916499535293256,
"grad_norm": 19.343788146972656,
"learning_rate": 4.649768082330773e-05,
"loss": 3.1659,
"step": 5000
},
{
"epoch": 0.48916499535293256,
"eval_runtime": 181.8771,
"eval_samples_per_second": 112.395,
"eval_steps_per_second": 14.053,
"step": 5000
},
{
"epoch": 0.4989482952599912,
"grad_norm": 19.657760620117188,
"learning_rate": 4.63164951442238e-05,
"loss": 3.0506,
"step": 5100
},
{
"epoch": 0.5087315951670498,
"grad_norm": 16.2425537109375,
"learning_rate": 4.613530946513987e-05,
"loss": 3.0524,
"step": 5200
},
{
"epoch": 0.5185148950741085,
"grad_norm": 19.64779281616211,
"learning_rate": 4.595412378605595e-05,
"loss": 2.9995,
"step": 5300
},
{
"epoch": 0.5282981949811671,
"grad_norm": 17.29520606994629,
"learning_rate": 4.577293810697203e-05,
"loss": 3.0932,
"step": 5400
},
{
"epoch": 0.5380814948882258,
"grad_norm": 17.694602966308594,
"learning_rate": 4.55917524278881e-05,
"loss": 3.0309,
"step": 5500
},
{
"epoch": 0.5380814948882258,
"eval_runtime": 181.7231,
"eval_samples_per_second": 112.49,
"eval_steps_per_second": 14.065,
"step": 5500
},
{
"epoch": 0.5478647947952845,
"grad_norm": 21.030174255371094,
"learning_rate": 4.541056674880418e-05,
"loss": 3.0313,
"step": 5600
},
{
"epoch": 0.5576480947023431,
"grad_norm": 12.339129447937012,
"learning_rate": 4.522938106972025e-05,
"loss": 3.047,
"step": 5700
},
{
"epoch": 0.5674313946094017,
"grad_norm": 16.496389389038086,
"learning_rate": 4.504819539063633e-05,
"loss": 2.9961,
"step": 5800
},
{
"epoch": 0.5772146945164603,
"grad_norm": 15.456297874450684,
"learning_rate": 4.48670097115524e-05,
"loss": 2.9821,
"step": 5900
},
{
"epoch": 0.5869979944235191,
"grad_norm": 17.8603572845459,
"learning_rate": 4.468582403246848e-05,
"loss": 2.9294,
"step": 6000
},
{
"epoch": 0.5869979944235191,
"eval_runtime": 181.8258,
"eval_samples_per_second": 112.426,
"eval_steps_per_second": 14.057,
"step": 6000
},
{
"epoch": 0.5967812943305777,
"grad_norm": 18.85349464416504,
"learning_rate": 4.450463835338455e-05,
"loss": 2.9929,
"step": 6100
},
{
"epoch": 0.6065645942376363,
"grad_norm": 22.971813201904297,
"learning_rate": 4.432345267430063e-05,
"loss": 2.9684,
"step": 6200
},
{
"epoch": 0.616347894144695,
"grad_norm": 15.877230644226074,
"learning_rate": 4.4142266995216706e-05,
"loss": 2.9399,
"step": 6300
},
{
"epoch": 0.6261311940517537,
"grad_norm": 19.847482681274414,
"learning_rate": 4.396108131613278e-05,
"loss": 2.88,
"step": 6400
},
{
"epoch": 0.6359144939588123,
"grad_norm": 15.004170417785645,
"learning_rate": 4.377989563704885e-05,
"loss": 2.9719,
"step": 6500
},
{
"epoch": 0.6359144939588123,
"eval_runtime": 182.6045,
"eval_samples_per_second": 111.947,
"eval_steps_per_second": 13.997,
"step": 6500
},
{
"epoch": 0.645697793865871,
"grad_norm": 19.473665237426758,
"learning_rate": 4.359870995796492e-05,
"loss": 2.9246,
"step": 6600
},
{
"epoch": 0.6554810937729296,
"grad_norm": 18.071683883666992,
"learning_rate": 4.3417524278881e-05,
"loss": 2.9031,
"step": 6700
},
{
"epoch": 0.6652643936799882,
"grad_norm": 17.544504165649414,
"learning_rate": 4.323633859979707e-05,
"loss": 2.8313,
"step": 6800
},
{
"epoch": 0.6750476935870469,
"grad_norm": 18.936140060424805,
"learning_rate": 4.305515292071315e-05,
"loss": 2.8536,
"step": 6900
},
{
"epoch": 0.6848309934941056,
"grad_norm": 14.77696418762207,
"learning_rate": 4.2873967241629226e-05,
"loss": 2.9104,
"step": 7000
},
{
"epoch": 0.6848309934941056,
"eval_runtime": 181.938,
"eval_samples_per_second": 112.357,
"eval_steps_per_second": 14.049,
"step": 7000
},
{
"epoch": 0.6946142934011642,
"grad_norm": 14.303226470947266,
"learning_rate": 4.26927815625453e-05,
"loss": 2.8386,
"step": 7100
},
{
"epoch": 0.7043975933082228,
"grad_norm": 17.11782455444336,
"learning_rate": 4.2511595883461376e-05,
"loss": 2.9013,
"step": 7200
},
{
"epoch": 0.7141808932152816,
"grad_norm": 18.661100387573242,
"learning_rate": 4.233041020437745e-05,
"loss": 2.9428,
"step": 7300
},
{
"epoch": 0.7239641931223402,
"grad_norm": 15.535719871520996,
"learning_rate": 4.2149224525293525e-05,
"loss": 2.8582,
"step": 7400
},
{
"epoch": 0.7337474930293988,
"grad_norm": 15.3306303024292,
"learning_rate": 4.19680388462096e-05,
"loss": 2.8896,
"step": 7500
},
{
"epoch": 0.7337474930293988,
"eval_runtime": 181.8938,
"eval_samples_per_second": 112.384,
"eval_steps_per_second": 14.052,
"step": 7500
},
{
"epoch": 0.7435307929364575,
"grad_norm": 16.730344772338867,
"learning_rate": 4.1786853167125675e-05,
"loss": 2.9097,
"step": 7600
},
{
"epoch": 0.7533140928435161,
"grad_norm": 18.755483627319336,
"learning_rate": 4.1605667488041746e-05,
"loss": 2.8815,
"step": 7700
},
{
"epoch": 0.7630973927505748,
"grad_norm": 18.737581253051758,
"learning_rate": 4.1424481808957824e-05,
"loss": 2.9202,
"step": 7800
},
{
"epoch": 0.7728806926576334,
"grad_norm": 14.711681365966797,
"learning_rate": 4.1243296129873896e-05,
"loss": 2.806,
"step": 7900
},
{
"epoch": 0.7826639925646921,
"grad_norm": 17.5069580078125,
"learning_rate": 4.106211045078997e-05,
"loss": 2.8576,
"step": 8000
},
{
"epoch": 0.7826639925646921,
"eval_runtime": 181.9442,
"eval_samples_per_second": 112.353,
"eval_steps_per_second": 14.048,
"step": 8000
},
{
"epoch": 0.7924472924717507,
"grad_norm": 17.678852081298828,
"learning_rate": 4.0880924771706046e-05,
"loss": 2.8035,
"step": 8100
},
{
"epoch": 0.8022305923788093,
"grad_norm": 17.644638061523438,
"learning_rate": 4.069973909262212e-05,
"loss": 2.7958,
"step": 8200
},
{
"epoch": 0.8120138922858681,
"grad_norm": 18.377134323120117,
"learning_rate": 4.0518553413538195e-05,
"loss": 2.8055,
"step": 8300
},
{
"epoch": 0.8217971921929267,
"grad_norm": 18.026033401489258,
"learning_rate": 4.0337367734454273e-05,
"loss": 2.7334,
"step": 8400
},
{
"epoch": 0.8315804920999853,
"grad_norm": 14.77315616607666,
"learning_rate": 4.0156182055370345e-05,
"loss": 2.8082,
"step": 8500
},
{
"epoch": 0.8315804920999853,
"eval_runtime": 182.4176,
"eval_samples_per_second": 112.062,
"eval_steps_per_second": 14.012,
"step": 8500
},
{
"epoch": 0.841363792007044,
"grad_norm": 13.729479789733887,
"learning_rate": 3.997499637628642e-05,
"loss": 2.7939,
"step": 8600
},
{
"epoch": 0.8511470919141026,
"grad_norm": 16.34333610534668,
"learning_rate": 3.9793810697202494e-05,
"loss": 2.8517,
"step": 8700
},
{
"epoch": 0.8609303918211613,
"grad_norm": 22.484411239624023,
"learning_rate": 3.961262501811857e-05,
"loss": 2.776,
"step": 8800
},
{
"epoch": 0.8707136917282199,
"grad_norm": 15.922870635986328,
"learning_rate": 3.9431439339034644e-05,
"loss": 2.7909,
"step": 8900
},
{
"epoch": 0.8804969916352786,
"grad_norm": 15.06955623626709,
"learning_rate": 3.925025365995072e-05,
"loss": 2.8416,
"step": 9000
},
{
"epoch": 0.8804969916352786,
"eval_runtime": 181.9314,
"eval_samples_per_second": 112.361,
"eval_steps_per_second": 14.049,
"step": 9000
},
{
"epoch": 0.8902802915423372,
"grad_norm": 16.060428619384766,
"learning_rate": 3.9069067980866794e-05,
"loss": 2.7803,
"step": 9100
},
{
"epoch": 0.9000635914493959,
"grad_norm": 16.80124855041504,
"learning_rate": 3.888788230178287e-05,
"loss": 2.7548,
"step": 9200
},
{
"epoch": 0.9098468913564546,
"grad_norm": 16.608434677124023,
"learning_rate": 3.870669662269894e-05,
"loss": 2.8606,
"step": 9300
},
{
"epoch": 0.9196301912635132,
"grad_norm": 14.83870792388916,
"learning_rate": 3.8525510943615015e-05,
"loss": 2.7833,
"step": 9400
},
{
"epoch": 0.9294134911705718,
"grad_norm": 25.778181076049805,
"learning_rate": 3.834432526453109e-05,
"loss": 2.7434,
"step": 9500
},
{
"epoch": 0.9294134911705718,
"eval_runtime": 181.99,
"eval_samples_per_second": 112.325,
"eval_steps_per_second": 14.045,
"step": 9500
},
{
"epoch": 0.9391967910776304,
"grad_norm": 17.374011993408203,
"learning_rate": 3.8163139585447164e-05,
"loss": 2.7258,
"step": 9600
},
{
"epoch": 0.9489800909846892,
"grad_norm": 17.551128387451172,
"learning_rate": 3.798195390636324e-05,
"loss": 2.824,
"step": 9700
},
{
"epoch": 0.9587633908917478,
"grad_norm": 14.35797119140625,
"learning_rate": 3.7800768227279314e-05,
"loss": 2.745,
"step": 9800
},
{
"epoch": 0.9685466907988064,
"grad_norm": 20.098552703857422,
"learning_rate": 3.761958254819539e-05,
"loss": 2.7025,
"step": 9900
},
{
"epoch": 0.9783299907058651,
"grad_norm": 16.218109130859375,
"learning_rate": 3.743839686911147e-05,
"loss": 2.8093,
"step": 10000
},
{
"epoch": 0.9783299907058651,
"eval_runtime": 181.8987,
"eval_samples_per_second": 112.381,
"eval_steps_per_second": 14.052,
"step": 10000
},
{
"epoch": 0.9881132906129237,
"grad_norm": 17.198423385620117,
"learning_rate": 3.725721119002754e-05,
"loss": 2.7124,
"step": 10100
},
{
"epoch": 0.9978965905199824,
"grad_norm": 18.021198272705078,
"learning_rate": 3.707602551094362e-05,
"loss": 2.6922,
"step": 10200
},
{
"epoch": 1.007679890427041,
"grad_norm": 15.27678108215332,
"learning_rate": 3.689483983185969e-05,
"loss": 2.6743,
"step": 10300
},
{
"epoch": 1.0174631903340996,
"grad_norm": 16.770511627197266,
"learning_rate": 3.671365415277577e-05,
"loss": 2.857,
"step": 10400
},
{
"epoch": 1.0272464902411584,
"grad_norm": 18.810932159423828,
"learning_rate": 3.653246847369184e-05,
"loss": 2.7269,
"step": 10500
},
{
"epoch": 1.0272464902411584,
"eval_runtime": 181.8537,
"eval_samples_per_second": 112.409,
"eval_steps_per_second": 14.055,
"step": 10500
},
{
"epoch": 1.037029790148217,
"grad_norm": 18.56201171875,
"learning_rate": 3.635128279460791e-05,
"loss": 2.7325,
"step": 10600
},
{
"epoch": 1.0468130900552757,
"grad_norm": 15.063011169433594,
"learning_rate": 3.617009711552399e-05,
"loss": 2.7827,
"step": 10700
},
{
"epoch": 1.0565963899623343,
"grad_norm": 15.339439392089844,
"learning_rate": 3.598891143644006e-05,
"loss": 2.7472,
"step": 10800
},
{
"epoch": 1.066379689869393,
"grad_norm": 17.466033935546875,
"learning_rate": 3.580772575735614e-05,
"loss": 2.7859,
"step": 10900
},
{
"epoch": 1.0761629897764515,
"grad_norm": 20.727872848510742,
"learning_rate": 3.562654007827221e-05,
"loss": 2.7278,
"step": 11000
},
{
"epoch": 1.0761629897764515,
"eval_runtime": 181.8566,
"eval_samples_per_second": 112.407,
"eval_steps_per_second": 14.055,
"step": 11000
},
{
"epoch": 1.0859462896835101,
"grad_norm": 16.02055549621582,
"learning_rate": 3.544535439918829e-05,
"loss": 2.6307,
"step": 11100
},
{
"epoch": 1.095729589590569,
"grad_norm": 20.069686889648438,
"learning_rate": 3.526416872010436e-05,
"loss": 2.711,
"step": 11200
},
{
"epoch": 1.1055128894976276,
"grad_norm": 14.833261489868164,
"learning_rate": 3.508298304102044e-05,
"loss": 2.6141,
"step": 11300
},
{
"epoch": 1.1152961894046862,
"grad_norm": 14.86436653137207,
"learning_rate": 3.490179736193652e-05,
"loss": 2.6816,
"step": 11400
},
{
"epoch": 1.1250794893117448,
"grad_norm": 17.955862045288086,
"learning_rate": 3.472061168285259e-05,
"loss": 2.6924,
"step": 11500
},
{
"epoch": 1.1250794893117448,
"eval_runtime": 181.8085,
"eval_samples_per_second": 112.437,
"eval_steps_per_second": 14.059,
"step": 11500
},
{
"epoch": 1.1348627892188035,
"grad_norm": 18.360109329223633,
"learning_rate": 3.453942600376867e-05,
"loss": 2.6181,
"step": 11600
},
{
"epoch": 1.144646089125862,
"grad_norm": 17.547542572021484,
"learning_rate": 3.435824032468474e-05,
"loss": 2.6394,
"step": 11700
},
{
"epoch": 1.154429389032921,
"grad_norm": 12.194833755493164,
"learning_rate": 3.417705464560082e-05,
"loss": 2.6684,
"step": 11800
},
{
"epoch": 1.1642126889399795,
"grad_norm": 17.095104217529297,
"learning_rate": 3.399586896651689e-05,
"loss": 2.6129,
"step": 11900
},
{
"epoch": 1.1739959888470382,
"grad_norm": 20.788406372070312,
"learning_rate": 3.381468328743296e-05,
"loss": 2.5663,
"step": 12000
},
{
"epoch": 1.1739959888470382,
"eval_runtime": 181.8035,
"eval_samples_per_second": 112.44,
"eval_steps_per_second": 14.059,
"step": 12000
},
{
"epoch": 1.1837792887540968,
"grad_norm": 14.261167526245117,
"learning_rate": 3.363349760834904e-05,
"loss": 2.6544,
"step": 12100
},
{
"epoch": 1.1935625886611554,
"grad_norm": 24.68012046813965,
"learning_rate": 3.345231192926511e-05,
"loss": 2.6632,
"step": 12200
},
{
"epoch": 1.203345888568214,
"grad_norm": 16.10886573791504,
"learning_rate": 3.327112625018119e-05,
"loss": 2.6366,
"step": 12300
},
{
"epoch": 1.2131291884752726,
"grad_norm": 18.038848876953125,
"learning_rate": 3.308994057109726e-05,
"loss": 2.6563,
"step": 12400
},
{
"epoch": 1.2229124883823315,
"grad_norm": 17.40920639038086,
"learning_rate": 3.290875489201334e-05,
"loss": 2.718,
"step": 12500
},
{
"epoch": 1.2229124883823315,
"eval_runtime": 181.9491,
"eval_samples_per_second": 112.35,
"eval_steps_per_second": 14.048,
"step": 12500
},
{
"epoch": 1.23269578828939,
"grad_norm": 15.097307205200195,
"learning_rate": 3.272756921292941e-05,
"loss": 2.7282,
"step": 12600
},
{
"epoch": 1.2424790881964487,
"grad_norm": 17.63008689880371,
"learning_rate": 3.254638353384549e-05,
"loss": 2.7104,
"step": 12700
},
{
"epoch": 1.2522623881035073,
"grad_norm": 16.161130905151367,
"learning_rate": 3.236519785476156e-05,
"loss": 2.6427,
"step": 12800
},
{
"epoch": 1.262045688010566,
"grad_norm": 18.786882400512695,
"learning_rate": 3.218401217567764e-05,
"loss": 2.6105,
"step": 12900
},
{
"epoch": 1.2718289879176246,
"grad_norm": 24.145421981811523,
"learning_rate": 3.2002826496593715e-05,
"loss": 2.6322,
"step": 13000
},
{
"epoch": 1.2718289879176246,
"eval_runtime": 182.5613,
"eval_samples_per_second": 111.973,
"eval_steps_per_second": 14.001,
"step": 13000
},
{
"epoch": 1.2816122878246832,
"grad_norm": 15.286133766174316,
"learning_rate": 3.1821640817509786e-05,
"loss": 2.6465,
"step": 13100
},
{
"epoch": 1.291395587731742,
"grad_norm": 21.22935676574707,
"learning_rate": 3.1640455138425865e-05,
"loss": 2.6691,
"step": 13200
},
{
"epoch": 1.3011788876388006,
"grad_norm": 18.064428329467773,
"learning_rate": 3.1459269459341936e-05,
"loss": 2.5904,
"step": 13300
},
{
"epoch": 1.3109621875458592,
"grad_norm": 14.45976448059082,
"learning_rate": 3.127808378025801e-05,
"loss": 2.6602,
"step": 13400
},
{
"epoch": 1.3207454874529179,
"grad_norm": 19.72386360168457,
"learning_rate": 3.109689810117408e-05,
"loss": 2.6337,
"step": 13500
},
{
"epoch": 1.3207454874529179,
"eval_runtime": 182.4053,
"eval_samples_per_second": 112.069,
"eval_steps_per_second": 14.013,
"step": 13500
},
{
"epoch": 1.3305287873599765,
"grad_norm": 17.639583587646484,
"learning_rate": 3.091571242209016e-05,
"loss": 2.6135,
"step": 13600
},
{
"epoch": 1.340312087267035,
"grad_norm": 19.71700096130371,
"learning_rate": 3.0734526743006235e-05,
"loss": 2.6252,
"step": 13700
},
{
"epoch": 1.3500953871740937,
"grad_norm": 16.715856552124023,
"learning_rate": 3.055334106392231e-05,
"loss": 2.6475,
"step": 13800
},
{
"epoch": 1.3598786870811526,
"grad_norm": 12.645075798034668,
"learning_rate": 3.0372155384838385e-05,
"loss": 2.6199,
"step": 13900
},
{
"epoch": 1.3696619869882112,
"grad_norm": 20.150625228881836,
"learning_rate": 3.0190969705754456e-05,
"loss": 2.5567,
"step": 14000
},
{
"epoch": 1.3696619869882112,
"eval_runtime": 181.9086,
"eval_samples_per_second": 112.375,
"eval_steps_per_second": 14.051,
"step": 14000
},
{
"epoch": 1.3794452868952698,
"grad_norm": 19.111286163330078,
"learning_rate": 3.0009784026670535e-05,
"loss": 2.59,
"step": 14100
},
{
"epoch": 1.3892285868023284,
"grad_norm": 17.12226104736328,
"learning_rate": 2.9828598347586606e-05,
"loss": 2.5913,
"step": 14200
},
{
"epoch": 1.399011886709387,
"grad_norm": 19.741445541381836,
"learning_rate": 2.9647412668502684e-05,
"loss": 2.5617,
"step": 14300
},
{
"epoch": 1.4087951866164456,
"grad_norm": 17.605525970458984,
"learning_rate": 2.946622698941876e-05,
"loss": 2.6077,
"step": 14400
},
{
"epoch": 1.4185784865235043,
"grad_norm": 17.433218002319336,
"learning_rate": 2.928504131033483e-05,
"loss": 2.5713,
"step": 14500
},
{
"epoch": 1.4185784865235043,
"eval_runtime": 181.9305,
"eval_samples_per_second": 112.362,
"eval_steps_per_second": 14.049,
"step": 14500
},
{
"epoch": 1.428361786430563,
"grad_norm": 15.442538261413574,
"learning_rate": 2.910385563125091e-05,
"loss": 2.6499,
"step": 14600
},
{
"epoch": 1.4381450863376217,
"grad_norm": 15.078730583190918,
"learning_rate": 2.892266995216698e-05,
"loss": 2.6517,
"step": 14700
},
{
"epoch": 1.4479283862446803,
"grad_norm": 23.07891273498535,
"learning_rate": 2.874148427308306e-05,
"loss": 2.594,
"step": 14800
},
{
"epoch": 1.457711686151739,
"grad_norm": 16.707923889160156,
"learning_rate": 2.856029859399913e-05,
"loss": 2.6613,
"step": 14900
},
{
"epoch": 1.4674949860587976,
"grad_norm": 16.731164932250977,
"learning_rate": 2.8379112914915208e-05,
"loss": 2.5927,
"step": 15000
},
{
"epoch": 1.4674949860587976,
"eval_runtime": 181.9649,
"eval_samples_per_second": 112.34,
"eval_steps_per_second": 14.047,
"step": 15000
},
{
"epoch": 1.4772782859658564,
"grad_norm": 16.020864486694336,
"learning_rate": 2.819792723583128e-05,
"loss": 2.6464,
"step": 15100
},
{
"epoch": 1.4870615858729148,
"grad_norm": 16.674760818481445,
"learning_rate": 2.8016741556747354e-05,
"loss": 2.5853,
"step": 15200
},
{
"epoch": 1.4968448857799737,
"grad_norm": 16.890748977661133,
"learning_rate": 2.7835555877663432e-05,
"loss": 2.5748,
"step": 15300
},
{
"epoch": 1.5066281856870323,
"grad_norm": 20.217845916748047,
"learning_rate": 2.7654370198579504e-05,
"loss": 2.6204,
"step": 15400
},
{
"epoch": 1.516411485594091,
"grad_norm": 20.459087371826172,
"learning_rate": 2.7473184519495582e-05,
"loss": 2.6103,
"step": 15500
},
{
"epoch": 1.516411485594091,
"eval_runtime": 181.9454,
"eval_samples_per_second": 112.352,
"eval_steps_per_second": 14.048,
"step": 15500
},
{
"epoch": 1.5261947855011495,
"grad_norm": 18.207612991333008,
"learning_rate": 2.7291998840411654e-05,
"loss": 2.5786,
"step": 15600
},
{
"epoch": 1.5359780854082081,
"grad_norm": 18.084758758544922,
"learning_rate": 2.7110813161327732e-05,
"loss": 2.6535,
"step": 15700
},
{
"epoch": 1.545761385315267,
"grad_norm": 15.03881549835205,
"learning_rate": 2.6929627482243803e-05,
"loss": 2.6061,
"step": 15800
},
{
"epoch": 1.5555446852223254,
"grad_norm": 16.99995231628418,
"learning_rate": 2.6748441803159878e-05,
"loss": 2.6151,
"step": 15900
},
{
"epoch": 1.5653279851293842,
"grad_norm": 15.581089973449707,
"learning_rate": 2.6567256124075956e-05,
"loss": 2.6163,
"step": 16000
},
{
"epoch": 1.5653279851293842,
"eval_runtime": 181.8152,
"eval_samples_per_second": 112.433,
"eval_steps_per_second": 14.058,
"step": 16000
},
{
"epoch": 1.5751112850364428,
"grad_norm": 21.4382266998291,
"learning_rate": 2.6386070444992028e-05,
"loss": 2.5975,
"step": 16100
},
{
"epoch": 1.5848945849435014,
"grad_norm": 15.874536514282227,
"learning_rate": 2.6204884765908106e-05,
"loss": 2.5851,
"step": 16200
},
{
"epoch": 1.59467788485056,
"grad_norm": 17.902137756347656,
"learning_rate": 2.6023699086824177e-05,
"loss": 2.6027,
"step": 16300
},
{
"epoch": 1.6044611847576187,
"grad_norm": 17.04872703552246,
"learning_rate": 2.5842513407740255e-05,
"loss": 2.5854,
"step": 16400
},
{
"epoch": 1.6142444846646775,
"grad_norm": 15.406013488769531,
"learning_rate": 2.5661327728656327e-05,
"loss": 2.5158,
"step": 16500
},
{
"epoch": 1.6142444846646775,
"eval_runtime": 181.8647,
"eval_samples_per_second": 112.402,
"eval_steps_per_second": 14.054,
"step": 16500
},
{
"epoch": 1.624027784571736,
"grad_norm": 19.62627601623535,
"learning_rate": 2.5480142049572402e-05,
"loss": 2.5378,
"step": 16600
},
{
"epoch": 1.6338110844787948,
"grad_norm": 17.825178146362305,
"learning_rate": 2.529895637048848e-05,
"loss": 2.6162,
"step": 16700
},
{
"epoch": 1.6435943843858534,
"grad_norm": 15.442023277282715,
"learning_rate": 2.511777069140455e-05,
"loss": 2.5802,
"step": 16800
},
{
"epoch": 1.653377684292912,
"grad_norm": 18.695241928100586,
"learning_rate": 2.4936585012320626e-05,
"loss": 2.585,
"step": 16900
},
{
"epoch": 1.6631609841999706,
"grad_norm": 18.992969512939453,
"learning_rate": 2.4755399333236704e-05,
"loss": 2.5448,
"step": 17000
},
{
"epoch": 1.6631609841999706,
"eval_runtime": 181.91,
"eval_samples_per_second": 112.374,
"eval_steps_per_second": 14.051,
"step": 17000
},
{
"epoch": 1.6729442841070292,
"grad_norm": 19.065349578857422,
"learning_rate": 2.457421365415278e-05,
"loss": 2.6565,
"step": 17100
},
{
"epoch": 1.682727584014088,
"grad_norm": 20.110734939575195,
"learning_rate": 2.439302797506885e-05,
"loss": 2.5519,
"step": 17200
},
{
"epoch": 1.6925108839211465,
"grad_norm": 15.886931419372559,
"learning_rate": 2.4211842295984925e-05,
"loss": 2.5589,
"step": 17300
},
{
"epoch": 1.7022941838282053,
"grad_norm": 19.213207244873047,
"learning_rate": 2.4030656616901e-05,
"loss": 2.5714,
"step": 17400
},
{
"epoch": 1.712077483735264,
"grad_norm": 17.117481231689453,
"learning_rate": 2.3849470937817075e-05,
"loss": 2.6682,
"step": 17500
},
{
"epoch": 1.712077483735264,
"eval_runtime": 181.766,
"eval_samples_per_second": 112.463,
"eval_steps_per_second": 14.062,
"step": 17500
},
{
"epoch": 1.7218607836423225,
"grad_norm": 17.19162940979004,
"learning_rate": 2.366828525873315e-05,
"loss": 2.5591,
"step": 17600
},
{
"epoch": 1.7316440835493812,
"grad_norm": 15.454411506652832,
"learning_rate": 2.3487099579649225e-05,
"loss": 2.469,
"step": 17700
},
{
"epoch": 1.7414273834564398,
"grad_norm": 15.227791786193848,
"learning_rate": 2.3305913900565303e-05,
"loss": 2.664,
"step": 17800
},
{
"epoch": 1.7512106833634986,
"grad_norm": 18.5739688873291,
"learning_rate": 2.3124728221481374e-05,
"loss": 2.5991,
"step": 17900
},
{
"epoch": 1.760993983270557,
"grad_norm": 12.589066505432129,
"learning_rate": 2.294354254239745e-05,
"loss": 2.6593,
"step": 18000
},
{
"epoch": 1.760993983270557,
"eval_runtime": 181.9699,
"eval_samples_per_second": 112.337,
"eval_steps_per_second": 14.046,
"step": 18000
},
{
"epoch": 1.7707772831776158,
"grad_norm": 20.695772171020508,
"learning_rate": 2.2762356863313524e-05,
"loss": 2.5555,
"step": 18100
},
{
"epoch": 1.7805605830846745,
"grad_norm": 12.731703758239746,
"learning_rate": 2.25811711842296e-05,
"loss": 2.4617,
"step": 18200
},
{
"epoch": 1.790343882991733,
"grad_norm": 18.506074905395508,
"learning_rate": 2.2399985505145674e-05,
"loss": 2.6061,
"step": 18300
},
{
"epoch": 1.800127182898792,
"grad_norm": 14.8694486618042,
"learning_rate": 2.221879982606175e-05,
"loss": 2.5779,
"step": 18400
},
{
"epoch": 1.8099104828058503,
"grad_norm": 22.47985076904297,
"learning_rate": 2.2037614146977827e-05,
"loss": 2.5012,
"step": 18500
},
{
"epoch": 1.8099104828058503,
"eval_runtime": 182.3919,
"eval_samples_per_second": 112.077,
"eval_steps_per_second": 14.014,
"step": 18500
},
{
"epoch": 1.8196937827129092,
"grad_norm": 25.74334144592285,
"learning_rate": 2.1856428467893898e-05,
"loss": 2.5265,
"step": 18600
},
{
"epoch": 1.8294770826199676,
"grad_norm": 18.477630615234375,
"learning_rate": 2.1675242788809973e-05,
"loss": 2.5555,
"step": 18700
},
{
"epoch": 1.8392603825270264,
"grad_norm": 14.832316398620605,
"learning_rate": 2.1494057109726048e-05,
"loss": 2.4609,
"step": 18800
},
{
"epoch": 1.849043682434085,
"grad_norm": 17.025096893310547,
"learning_rate": 2.1312871430642123e-05,
"loss": 2.5119,
"step": 18900
},
{
"epoch": 1.8588269823411436,
"grad_norm": 16.852436065673828,
"learning_rate": 2.1131685751558197e-05,
"loss": 2.5369,
"step": 19000
},
{
"epoch": 1.8588269823411436,
"eval_runtime": 181.7443,
"eval_samples_per_second": 112.477,
"eval_steps_per_second": 14.064,
"step": 19000
},
{
"epoch": 1.8686102822482025,
"grad_norm": 15.160259246826172,
"learning_rate": 2.0950500072474272e-05,
"loss": 2.6297,
"step": 19100
},
{
"epoch": 1.8783935821552609,
"grad_norm": 15.909671783447266,
"learning_rate": 2.0769314393390347e-05,
"loss": 2.4696,
"step": 19200
},
{
"epoch": 1.8881768820623197,
"grad_norm": 14.201844215393066,
"learning_rate": 2.0588128714306422e-05,
"loss": 2.5653,
"step": 19300
},
{
"epoch": 1.8979601819693783,
"grad_norm": 16.351415634155273,
"learning_rate": 2.0406943035222497e-05,
"loss": 2.4962,
"step": 19400
},
{
"epoch": 1.907743481876437,
"grad_norm": 16.943771362304688,
"learning_rate": 2.022575735613857e-05,
"loss": 2.5091,
"step": 19500
},
{
"epoch": 1.907743481876437,
"eval_runtime": 181.6486,
"eval_samples_per_second": 112.536,
"eval_steps_per_second": 14.071,
"step": 19500
},
{
"epoch": 1.9175267817834956,
"grad_norm": 15.006349563598633,
"learning_rate": 2.0044571677054646e-05,
"loss": 2.5214,
"step": 19600
},
{
"epoch": 1.9273100816905542,
"grad_norm": 17.305580139160156,
"learning_rate": 1.986338599797072e-05,
"loss": 2.4989,
"step": 19700
},
{
"epoch": 1.937093381597613,
"grad_norm": 17.28044891357422,
"learning_rate": 1.9682200318886796e-05,
"loss": 2.4008,
"step": 19800
},
{
"epoch": 1.9468766815046714,
"grad_norm": 18.25079917907715,
"learning_rate": 1.950101463980287e-05,
"loss": 2.6015,
"step": 19900
},
{
"epoch": 1.9566599814117303,
"grad_norm": 20.741668701171875,
"learning_rate": 1.9319828960718946e-05,
"loss": 2.4081,
"step": 20000
},
{
"epoch": 1.9566599814117303,
"eval_runtime": 181.7745,
"eval_samples_per_second": 112.458,
"eval_steps_per_second": 14.061,
"step": 20000
},
{
"epoch": 1.9664432813187889,
"grad_norm": 16.1226863861084,
"learning_rate": 1.913864328163502e-05,
"loss": 2.5418,
"step": 20100
},
{
"epoch": 1.9762265812258475,
"grad_norm": 13.914982795715332,
"learning_rate": 1.8957457602551095e-05,
"loss": 2.5248,
"step": 20200
},
{
"epoch": 1.986009881132906,
"grad_norm": 15.072690963745117,
"learning_rate": 1.877627192346717e-05,
"loss": 2.5488,
"step": 20300
},
{
"epoch": 1.9957931810399647,
"grad_norm": 15.510763168334961,
"learning_rate": 1.8595086244383245e-05,
"loss": 2.4605,
"step": 20400
},
{
"epoch": 2.0055764809470236,
"grad_norm": 18.463842391967773,
"learning_rate": 1.841390056529932e-05,
"loss": 2.522,
"step": 20500
},
{
"epoch": 2.0055764809470236,
"eval_runtime": 182.07,
"eval_samples_per_second": 112.276,
"eval_steps_per_second": 14.039,
"step": 20500
},
{
"epoch": 2.015359780854082,
"grad_norm": 16.670269012451172,
"learning_rate": 1.8232714886215394e-05,
"loss": 2.5585,
"step": 20600
},
{
"epoch": 2.025143080761141,
"grad_norm": 20.60368537902832,
"learning_rate": 1.805152920713147e-05,
"loss": 2.5381,
"step": 20700
},
{
"epoch": 2.034926380668199,
"grad_norm": 15.686981201171875,
"learning_rate": 1.7870343528047544e-05,
"loss": 2.5721,
"step": 20800
},
{
"epoch": 2.044709680575258,
"grad_norm": 14.691718101501465,
"learning_rate": 1.768915784896362e-05,
"loss": 2.5187,
"step": 20900
},
{
"epoch": 2.054492980482317,
"grad_norm": 16.31734848022461,
"learning_rate": 1.7507972169879694e-05,
"loss": 2.5202,
"step": 21000
},
{
"epoch": 2.054492980482317,
"eval_runtime": 181.9896,
"eval_samples_per_second": 112.325,
"eval_steps_per_second": 14.045,
"step": 21000
},
{
"epoch": 2.0642762803893753,
"grad_norm": 12.698554992675781,
"learning_rate": 1.732678649079577e-05,
"loss": 2.4228,
"step": 21100
},
{
"epoch": 2.074059580296434,
"grad_norm": 16.34201431274414,
"learning_rate": 1.7145600811711843e-05,
"loss": 2.3963,
"step": 21200
},
{
"epoch": 2.0838428802034925,
"grad_norm": 16.52840232849121,
"learning_rate": 1.6964415132627918e-05,
"loss": 2.4759,
"step": 21300
},
{
"epoch": 2.0936261801105513,
"grad_norm": 14.856452941894531,
"learning_rate": 1.6783229453543993e-05,
"loss": 2.4675,
"step": 21400
},
{
"epoch": 2.1034094800176097,
"grad_norm": 19.68895721435547,
"learning_rate": 1.6602043774460068e-05,
"loss": 2.5324,
"step": 21500
},
{
"epoch": 2.1034094800176097,
"eval_runtime": 182.1877,
"eval_samples_per_second": 112.203,
"eval_steps_per_second": 14.029,
"step": 21500
},
{
"epoch": 2.1131927799246686,
"grad_norm": 23.248056411743164,
"learning_rate": 1.6420858095376143e-05,
"loss": 2.5231,
"step": 21600
},
{
"epoch": 2.1229760798317274,
"grad_norm": 25.471004486083984,
"learning_rate": 1.6239672416292217e-05,
"loss": 2.5871,
"step": 21700
},
{
"epoch": 2.132759379738786,
"grad_norm": 17.794851303100586,
"learning_rate": 1.6058486737208292e-05,
"loss": 2.5008,
"step": 21800
},
{
"epoch": 2.1425426796458447,
"grad_norm": 15.450346946716309,
"learning_rate": 1.5877301058124367e-05,
"loss": 2.4194,
"step": 21900
},
{
"epoch": 2.152325979552903,
"grad_norm": 13.243645668029785,
"learning_rate": 1.5696115379040442e-05,
"loss": 2.5018,
"step": 22000
},
{
"epoch": 2.152325979552903,
"eval_runtime": 181.9841,
"eval_samples_per_second": 112.328,
"eval_steps_per_second": 14.045,
"step": 22000
},
{
"epoch": 2.162109279459962,
"grad_norm": 16.996198654174805,
"learning_rate": 1.5514929699956517e-05,
"loss": 2.4492,
"step": 22100
},
{
"epoch": 2.1718925793670203,
"grad_norm": 20.05558967590332,
"learning_rate": 1.5333744020872588e-05,
"loss": 2.489,
"step": 22200
},
{
"epoch": 2.181675879274079,
"grad_norm": 15.66326904296875,
"learning_rate": 1.5152558341788666e-05,
"loss": 2.5089,
"step": 22300
},
{
"epoch": 2.191459179181138,
"grad_norm": 17.83564567565918,
"learning_rate": 1.4971372662704741e-05,
"loss": 2.4945,
"step": 22400
},
{
"epoch": 2.2012424790881964,
"grad_norm": 21.466899871826172,
"learning_rate": 1.4790186983620816e-05,
"loss": 2.5467,
"step": 22500
},
{
"epoch": 2.2012424790881964,
"eval_runtime": 182.8328,
"eval_samples_per_second": 111.807,
"eval_steps_per_second": 13.98,
"step": 22500
},
{
"epoch": 2.211025778995255,
"grad_norm": 17.91064453125,
"learning_rate": 1.4609001304536891e-05,
"loss": 2.5144,
"step": 22600
},
{
"epoch": 2.2208090789023136,
"grad_norm": 17.678396224975586,
"learning_rate": 1.4427815625452964e-05,
"loss": 2.5018,
"step": 22700
},
{
"epoch": 2.2305923788093724,
"grad_norm": 17.510461807250977,
"learning_rate": 1.4246629946369039e-05,
"loss": 2.4228,
"step": 22800
},
{
"epoch": 2.240375678716431,
"grad_norm": 24.923967361450195,
"learning_rate": 1.4065444267285114e-05,
"loss": 2.5249,
"step": 22900
},
{
"epoch": 2.2501589786234897,
"grad_norm": 17.82384490966797,
"learning_rate": 1.388425858820119e-05,
"loss": 2.4282,
"step": 23000
},
{
"epoch": 2.2501589786234897,
"eval_runtime": 182.0459,
"eval_samples_per_second": 112.29,
"eval_steps_per_second": 14.04,
"step": 23000
},
{
"epoch": 2.2599422785305485,
"grad_norm": 16.13028335571289,
"learning_rate": 1.3703072909117265e-05,
"loss": 2.4472,
"step": 23100
},
{
"epoch": 2.269725578437607,
"grad_norm": 15.137242317199707,
"learning_rate": 1.352188723003334e-05,
"loss": 2.5985,
"step": 23200
},
{
"epoch": 2.2795088783446658,
"grad_norm": 16.187530517578125,
"learning_rate": 1.3340701550949415e-05,
"loss": 2.4862,
"step": 23300
},
{
"epoch": 2.289292178251724,
"grad_norm": 18.84433937072754,
"learning_rate": 1.3159515871865488e-05,
"loss": 2.516,
"step": 23400
},
{
"epoch": 2.299075478158783,
"grad_norm": 20.209121704101562,
"learning_rate": 1.2978330192781563e-05,
"loss": 2.5031,
"step": 23500
},
{
"epoch": 2.299075478158783,
"eval_runtime": 181.9806,
"eval_samples_per_second": 112.331,
"eval_steps_per_second": 14.045,
"step": 23500
},
{
"epoch": 2.308858778065842,
"grad_norm": 67.4502182006836,
"learning_rate": 1.2797144513697637e-05,
"loss": 2.4491,
"step": 23600
},
{
"epoch": 2.3186420779729002,
"grad_norm": 14.940401077270508,
"learning_rate": 1.2615958834613712e-05,
"loss": 2.5669,
"step": 23700
},
{
"epoch": 2.328425377879959,
"grad_norm": 16.591793060302734,
"learning_rate": 1.2434773155529787e-05,
"loss": 2.4565,
"step": 23800
},
{
"epoch": 2.3382086777870175,
"grad_norm": 16.798791885375977,
"learning_rate": 1.2253587476445862e-05,
"loss": 2.4046,
"step": 23900
},
{
"epoch": 2.3479919776940763,
"grad_norm": 17.712255477905273,
"learning_rate": 1.2072401797361937e-05,
"loss": 2.4453,
"step": 24000
},
{
"epoch": 2.3479919776940763,
"eval_runtime": 182.0401,
"eval_samples_per_second": 112.294,
"eval_steps_per_second": 14.041,
"step": 24000
},
{
"epoch": 2.3577752776011347,
"grad_norm": 18.64284324645996,
"learning_rate": 1.1891216118278011e-05,
"loss": 2.3973,
"step": 24100
},
{
"epoch": 2.3675585775081935,
"grad_norm": 18.185895919799805,
"learning_rate": 1.1710030439194086e-05,
"loss": 2.5045,
"step": 24200
},
{
"epoch": 2.377341877415252,
"grad_norm": 23.201522827148438,
"learning_rate": 1.1528844760110163e-05,
"loss": 2.5402,
"step": 24300
},
{
"epoch": 2.3871251773223108,
"grad_norm": 21.606412887573242,
"learning_rate": 1.1347659081026236e-05,
"loss": 2.4285,
"step": 24400
},
{
"epoch": 2.3969084772293696,
"grad_norm": 16.318761825561523,
"learning_rate": 1.116647340194231e-05,
"loss": 2.5509,
"step": 24500
},
{
"epoch": 2.3969084772293696,
"eval_runtime": 182.0431,
"eval_samples_per_second": 112.292,
"eval_steps_per_second": 14.041,
"step": 24500
},
{
"epoch": 2.406691777136428,
"grad_norm": 17.779014587402344,
"learning_rate": 1.0985287722858386e-05,
"loss": 2.4245,
"step": 24600
},
{
"epoch": 2.416475077043487,
"grad_norm": 18.44321060180664,
"learning_rate": 1.080410204377446e-05,
"loss": 2.5223,
"step": 24700
},
{
"epoch": 2.4262583769505452,
"grad_norm": 24.017047882080078,
"learning_rate": 1.0622916364690535e-05,
"loss": 2.4846,
"step": 24800
},
{
"epoch": 2.436041676857604,
"grad_norm": 14.89560604095459,
"learning_rate": 1.044173068560661e-05,
"loss": 2.5922,
"step": 24900
},
{
"epoch": 2.445824976764663,
"grad_norm": 15.532561302185059,
"learning_rate": 1.0260545006522685e-05,
"loss": 2.3976,
"step": 25000
},
{
"epoch": 2.445824976764663,
"eval_runtime": 182.1033,
"eval_samples_per_second": 112.255,
"eval_steps_per_second": 14.036,
"step": 25000
},
{
"epoch": 2.4556082766717213,
"grad_norm": 18.041282653808594,
"learning_rate": 1.007935932743876e-05,
"loss": 2.4731,
"step": 25100
},
{
"epoch": 2.46539157657878,
"grad_norm": 13.40858268737793,
"learning_rate": 9.898173648354834e-06,
"loss": 2.4838,
"step": 25200
},
{
"epoch": 2.4751748764858386,
"grad_norm": 17.450841903686523,
"learning_rate": 9.71698796927091e-06,
"loss": 2.3999,
"step": 25300
},
{
"epoch": 2.4849581763928974,
"grad_norm": 17.556467056274414,
"learning_rate": 9.535802290186984e-06,
"loss": 2.3867,
"step": 25400
},
{
"epoch": 2.494741476299956,
"grad_norm": 18.578310012817383,
"learning_rate": 9.354616611103059e-06,
"loss": 2.4546,
"step": 25500
},
{
"epoch": 2.494741476299956,
"eval_runtime": 182.0338,
"eval_samples_per_second": 112.298,
"eval_steps_per_second": 14.041,
"step": 25500
},
{
"epoch": 2.5045247762070146,
"grad_norm": 14.936469078063965,
"learning_rate": 9.173430932019134e-06,
"loss": 2.5562,
"step": 25600
},
{
"epoch": 2.514308076114073,
"grad_norm": 17.527040481567383,
"learning_rate": 8.992245252935209e-06,
"loss": 2.4008,
"step": 25700
},
{
"epoch": 2.524091376021132,
"grad_norm": 12.91336727142334,
"learning_rate": 8.811059573851283e-06,
"loss": 2.4655,
"step": 25800
},
{
"epoch": 2.5338746759281907,
"grad_norm": 15.168461799621582,
"learning_rate": 8.629873894767358e-06,
"loss": 2.4468,
"step": 25900
},
{
"epoch": 2.543657975835249,
"grad_norm": 17.5390682220459,
"learning_rate": 8.448688215683433e-06,
"loss": 2.4836,
"step": 26000
},
{
"epoch": 2.543657975835249,
"eval_runtime": 182.1148,
"eval_samples_per_second": 112.248,
"eval_steps_per_second": 14.035,
"step": 26000
},
{
"epoch": 2.553441275742308,
"grad_norm": 15.126510620117188,
"learning_rate": 8.267502536599508e-06,
"loss": 2.387,
"step": 26100
},
{
"epoch": 2.5632245756493663,
"grad_norm": 15.374293327331543,
"learning_rate": 8.086316857515583e-06,
"loss": 2.3652,
"step": 26200
},
{
"epoch": 2.573007875556425,
"grad_norm": 15.498108863830566,
"learning_rate": 7.905131178431657e-06,
"loss": 2.4749,
"step": 26300
},
{
"epoch": 2.582791175463484,
"grad_norm": 16.221315383911133,
"learning_rate": 7.723945499347732e-06,
"loss": 2.4567,
"step": 26400
},
{
"epoch": 2.5925744753705424,
"grad_norm": 18.839122772216797,
"learning_rate": 7.542759820263806e-06,
"loss": 2.3554,
"step": 26500
},
{
"epoch": 2.5925744753705424,
"eval_runtime": 181.9597,
"eval_samples_per_second": 112.344,
"eval_steps_per_second": 14.047,
"step": 26500
},
{
"epoch": 2.6023577752776013,
"grad_norm": 22.626708984375,
"learning_rate": 7.361574141179882e-06,
"loss": 2.502,
"step": 26600
},
{
"epoch": 2.6121410751846597,
"grad_norm": 16.519880294799805,
"learning_rate": 7.180388462095957e-06,
"loss": 2.5034,
"step": 26700
},
{
"epoch": 2.6219243750917185,
"grad_norm": 27.421489715576172,
"learning_rate": 6.999202783012031e-06,
"loss": 2.5276,
"step": 26800
},
{
"epoch": 2.6317076749987773,
"grad_norm": 15.274630546569824,
"learning_rate": 6.8180171039281055e-06,
"loss": 2.4121,
"step": 26900
},
{
"epoch": 2.6414909749058357,
"grad_norm": 15.751582145690918,
"learning_rate": 6.636831424844181e-06,
"loss": 2.5799,
"step": 27000
},
{
"epoch": 2.6414909749058357,
"eval_runtime": 182.0873,
"eval_samples_per_second": 112.265,
"eval_steps_per_second": 14.037,
"step": 27000
},
{
"epoch": 2.651274274812894,
"grad_norm": 16.674850463867188,
"learning_rate": 6.455645745760255e-06,
"loss": 2.3872,
"step": 27100
},
{
"epoch": 2.661057574719953,
"grad_norm": 12.62803840637207,
"learning_rate": 6.27446006667633e-06,
"loss": 2.4,
"step": 27200
},
{
"epoch": 2.670840874627012,
"grad_norm": 18.055158615112305,
"learning_rate": 6.093274387592405e-06,
"loss": 2.4681,
"step": 27300
},
{
"epoch": 2.68062417453407,
"grad_norm": 17.21278190612793,
"learning_rate": 5.91208870850848e-06,
"loss": 2.5441,
"step": 27400
},
{
"epoch": 2.690407474441129,
"grad_norm": 20.945236206054688,
"learning_rate": 5.7309030294245544e-06,
"loss": 2.4388,
"step": 27500
},
{
"epoch": 2.690407474441129,
"eval_runtime": 182.1279,
"eval_samples_per_second": 112.24,
"eval_steps_per_second": 14.034,
"step": 27500
},
{
"epoch": 2.7001907743481874,
"grad_norm": 23.483661651611328,
"learning_rate": 5.549717350340629e-06,
"loss": 2.4589,
"step": 27600
},
{
"epoch": 2.7099740742552463,
"grad_norm": 17.954036712646484,
"learning_rate": 5.368531671256704e-06,
"loss": 2.4477,
"step": 27700
},
{
"epoch": 2.719757374162305,
"grad_norm": 16.187314987182617,
"learning_rate": 5.187345992172779e-06,
"loss": 2.4967,
"step": 27800
},
{
"epoch": 2.7295406740693635,
"grad_norm": 14.324910163879395,
"learning_rate": 5.006160313088854e-06,
"loss": 2.3921,
"step": 27900
},
{
"epoch": 2.7393239739764224,
"grad_norm": 20.81557846069336,
"learning_rate": 4.8249746340049285e-06,
"loss": 2.5201,
"step": 28000
},
{
"epoch": 2.7393239739764224,
"eval_runtime": 182.146,
"eval_samples_per_second": 112.229,
"eval_steps_per_second": 14.033,
"step": 28000
},
{
"epoch": 2.7491072738834808,
"grad_norm": 18.682844161987305,
"learning_rate": 4.643788954921003e-06,
"loss": 2.4325,
"step": 28100
},
{
"epoch": 2.7588905737905396,
"grad_norm": 16.227272033691406,
"learning_rate": 4.462603275837078e-06,
"loss": 2.3864,
"step": 28200
},
{
"epoch": 2.7686738736975984,
"grad_norm": 16.20302963256836,
"learning_rate": 4.281417596753152e-06,
"loss": 2.5296,
"step": 28300
},
{
"epoch": 2.778457173604657,
"grad_norm": 18.634096145629883,
"learning_rate": 4.100231917669228e-06,
"loss": 2.4514,
"step": 28400
},
{
"epoch": 2.7882404735117157,
"grad_norm": 13.040008544921875,
"learning_rate": 3.919046238585303e-06,
"loss": 2.3661,
"step": 28500
},
{
"epoch": 2.7882404735117157,
"eval_runtime": 181.9164,
"eval_samples_per_second": 112.37,
"eval_steps_per_second": 14.05,
"step": 28500
},
{
"epoch": 2.798023773418774,
"grad_norm": 14.142943382263184,
"learning_rate": 3.737860559501377e-06,
"loss": 2.5074,
"step": 28600
},
{
"epoch": 2.807807073325833,
"grad_norm": 17.934324264526367,
"learning_rate": 3.5566748804174523e-06,
"loss": 2.4224,
"step": 28700
},
{
"epoch": 2.8175903732328913,
"grad_norm": 14.450194358825684,
"learning_rate": 3.3754892013335267e-06,
"loss": 2.4949,
"step": 28800
},
{
"epoch": 2.82737367313995,
"grad_norm": 17.746837615966797,
"learning_rate": 3.194303522249602e-06,
"loss": 2.4153,
"step": 28900
},
{
"epoch": 2.8371569730470085,
"grad_norm": 13.962541580200195,
"learning_rate": 3.0131178431656763e-06,
"loss": 2.4804,
"step": 29000
},
{
"epoch": 2.8371569730470085,
"eval_runtime": 182.0262,
"eval_samples_per_second": 112.303,
"eval_steps_per_second": 14.042,
"step": 29000
},
{
"epoch": 2.8469402729540674,
"grad_norm": 16.669286727905273,
"learning_rate": 2.831932164081751e-06,
"loss": 2.5397,
"step": 29100
},
{
"epoch": 2.856723572861126,
"grad_norm": 15.421733856201172,
"learning_rate": 2.650746484997826e-06,
"loss": 2.4175,
"step": 29200
},
{
"epoch": 2.8665068727681846,
"grad_norm": 14.135702133178711,
"learning_rate": 2.4695608059139007e-06,
"loss": 2.5069,
"step": 29300
},
{
"epoch": 2.8762901726752435,
"grad_norm": 17.41412925720215,
"learning_rate": 2.2883751268299756e-06,
"loss": 2.3997,
"step": 29400
},
{
"epoch": 2.886073472582302,
"grad_norm": 14.824533462524414,
"learning_rate": 2.1071894477460504e-06,
"loss": 2.3945,
"step": 29500
},
{
"epoch": 2.886073472582302,
"eval_runtime": 181.9299,
"eval_samples_per_second": 112.362,
"eval_steps_per_second": 14.049,
"step": 29500
},
{
"epoch": 2.8958567724893607,
"grad_norm": 27.31865119934082,
"learning_rate": 1.926003768662125e-06,
"loss": 2.45,
"step": 29600
},
{
"epoch": 2.9056400723964195,
"grad_norm": 18.966655731201172,
"learning_rate": 1.7448180895781998e-06,
"loss": 2.3916,
"step": 29700
},
{
"epoch": 2.915423372303478,
"grad_norm": 18.538440704345703,
"learning_rate": 1.5636324104942746e-06,
"loss": 2.4625,
"step": 29800
},
{
"epoch": 2.9252066722105368,
"grad_norm": 21.757272720336914,
"learning_rate": 1.3824467314103494e-06,
"loss": 2.3722,
"step": 29900
},
{
"epoch": 2.934989972117595,
"grad_norm": 16.907358169555664,
"learning_rate": 1.201261052326424e-06,
"loss": 2.464,
"step": 30000
},
{
"epoch": 2.934989972117595,
"eval_runtime": 181.9148,
"eval_samples_per_second": 112.371,
"eval_steps_per_second": 14.051,
"step": 30000
},
{
"epoch": 2.944773272024654,
"grad_norm": 13.88399600982666,
"learning_rate": 1.0200753732424989e-06,
"loss": 2.5005,
"step": 30100
},
{
"epoch": 2.954556571931713,
"grad_norm": 19.77507781982422,
"learning_rate": 8.388896941585737e-07,
"loss": 2.3829,
"step": 30200
},
{
"epoch": 2.9643398718387712,
"grad_norm": 16.535932540893555,
"learning_rate": 6.577040150746485e-07,
"loss": 2.4788,
"step": 30300
},
{
"epoch": 2.9741231717458296,
"grad_norm": 15.027000427246094,
"learning_rate": 4.765183359907233e-07,
"loss": 2.5007,
"step": 30400
},
{
"epoch": 2.9839064716528885,
"grad_norm": 14.9392671585083,
"learning_rate": 2.953326569067981e-07,
"loss": 2.4847,
"step": 30500
},
{
"epoch": 2.9839064716528885,
"eval_runtime": 181.9853,
"eval_samples_per_second": 112.328,
"eval_steps_per_second": 14.045,
"step": 30500
}
],
"logging_steps": 100,
"max_steps": 30663,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0644527086729788e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}