gpt2-audio-test / trainer_state.json
danieldager's picture
Upload folder using huggingface_hub
2b7be0d verified
{
"best_global_step": 5100,
"best_metric": 1.591291904449463,
"best_model_checkpoint": "./checkpoints/gpt2_test_chunks30/checkpoint-5100",
"epoch": 0.51,
"eval_steps": 50,
"global_step": 5100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001,
"grad_norm": 4.633359432220459,
"learning_rate": 1.8e-06,
"loss": 7.7108,
"step": 10
},
{
"epoch": 0.002,
"grad_norm": 2.3646275997161865,
"learning_rate": 3.8e-06,
"loss": 7.4581,
"step": 20
},
{
"epoch": 0.003,
"grad_norm": 2.262131929397583,
"learning_rate": 5.8e-06,
"loss": 7.2566,
"step": 30
},
{
"epoch": 0.004,
"grad_norm": 2.6301262378692627,
"learning_rate": 7.8e-06,
"loss": 7.0053,
"step": 40
},
{
"epoch": 0.005,
"grad_norm": 2.221440315246582,
"learning_rate": 9.800000000000001e-06,
"loss": 6.6106,
"step": 50
},
{
"epoch": 0.005,
"eval_loss": 6.111213684082031,
"eval_runtime": 4.5453,
"eval_samples_per_second": 440.015,
"eval_steps_per_second": 27.501,
"step": 50
},
{
"epoch": 0.006,
"grad_norm": 1.959439992904663,
"learning_rate": 1.18e-05,
"loss": 6.1216,
"step": 60
},
{
"epoch": 0.007,
"grad_norm": 1.562995195388794,
"learning_rate": 1.3800000000000002e-05,
"loss": 5.6405,
"step": 70
},
{
"epoch": 0.008,
"grad_norm": 1.4331400394439697,
"learning_rate": 1.58e-05,
"loss": 5.1521,
"step": 80
},
{
"epoch": 0.009,
"grad_norm": 1.6445435285568237,
"learning_rate": 1.78e-05,
"loss": 4.7211,
"step": 90
},
{
"epoch": 0.01,
"grad_norm": 1.3628312349319458,
"learning_rate": 1.9800000000000004e-05,
"loss": 4.4151,
"step": 100
},
{
"epoch": 0.01,
"eval_loss": 4.110222816467285,
"eval_runtime": 4.5472,
"eval_samples_per_second": 439.827,
"eval_steps_per_second": 27.489,
"step": 100
},
{
"epoch": 0.011,
"grad_norm": 1.137888789176941,
"learning_rate": 2.18e-05,
"loss": 4.1114,
"step": 110
},
{
"epoch": 0.012,
"grad_norm": 1.4980727434158325,
"learning_rate": 2.38e-05,
"loss": 3.8488,
"step": 120
},
{
"epoch": 0.013,
"grad_norm": 1.1802358627319336,
"learning_rate": 2.58e-05,
"loss": 3.5541,
"step": 130
},
{
"epoch": 0.014,
"grad_norm": 1.0807380676269531,
"learning_rate": 2.7800000000000005e-05,
"loss": 3.4673,
"step": 140
},
{
"epoch": 0.015,
"grad_norm": 0.957696259021759,
"learning_rate": 2.98e-05,
"loss": 3.2401,
"step": 150
},
{
"epoch": 0.015,
"eval_loss": 3.2231221199035645,
"eval_runtime": 4.5997,
"eval_samples_per_second": 434.814,
"eval_steps_per_second": 27.176,
"step": 150
},
{
"epoch": 0.016,
"grad_norm": 0.8120741248130798,
"learning_rate": 3.18e-05,
"loss": 3.1228,
"step": 160
},
{
"epoch": 0.017,
"grad_norm": 0.8813096284866333,
"learning_rate": 3.38e-05,
"loss": 3.1247,
"step": 170
},
{
"epoch": 0.018,
"grad_norm": 0.7591573596000671,
"learning_rate": 3.58e-05,
"loss": 2.9657,
"step": 180
},
{
"epoch": 0.019,
"grad_norm": 0.6872971057891846,
"learning_rate": 3.7800000000000004e-05,
"loss": 2.8526,
"step": 190
},
{
"epoch": 0.02,
"grad_norm": 0.7402148246765137,
"learning_rate": 3.9800000000000005e-05,
"loss": 2.7961,
"step": 200
},
{
"epoch": 0.02,
"eval_loss": 2.8180038928985596,
"eval_runtime": 4.579,
"eval_samples_per_second": 436.777,
"eval_steps_per_second": 27.299,
"step": 200
},
{
"epoch": 0.021,
"grad_norm": 0.7957950234413147,
"learning_rate": 4.18e-05,
"loss": 2.8335,
"step": 210
},
{
"epoch": 0.022,
"grad_norm": 0.6467453837394714,
"learning_rate": 4.38e-05,
"loss": 2.7491,
"step": 220
},
{
"epoch": 0.023,
"grad_norm": 0.7767886519432068,
"learning_rate": 4.58e-05,
"loss": 2.7367,
"step": 230
},
{
"epoch": 0.024,
"grad_norm": 0.7259961366653442,
"learning_rate": 4.78e-05,
"loss": 2.6567,
"step": 240
},
{
"epoch": 0.025,
"grad_norm": 0.6372230648994446,
"learning_rate": 4.9800000000000004e-05,
"loss": 2.6064,
"step": 250
},
{
"epoch": 0.025,
"eval_loss": 2.628188133239746,
"eval_runtime": 4.6002,
"eval_samples_per_second": 434.768,
"eval_steps_per_second": 27.173,
"step": 250
},
{
"epoch": 0.026,
"grad_norm": 0.6601161956787109,
"learning_rate": 5.1800000000000005e-05,
"loss": 2.6328,
"step": 260
},
{
"epoch": 0.027,
"grad_norm": 0.5882264375686646,
"learning_rate": 5.380000000000001e-05,
"loss": 2.5728,
"step": 270
},
{
"epoch": 0.028,
"grad_norm": 0.601144552230835,
"learning_rate": 5.580000000000001e-05,
"loss": 2.5354,
"step": 280
},
{
"epoch": 0.029,
"grad_norm": 0.4997910261154175,
"learning_rate": 5.7799999999999995e-05,
"loss": 2.586,
"step": 290
},
{
"epoch": 0.03,
"grad_norm": 0.5014902949333191,
"learning_rate": 5.9800000000000003e-05,
"loss": 2.5666,
"step": 300
},
{
"epoch": 0.03,
"eval_loss": 2.5160727500915527,
"eval_runtime": 4.5957,
"eval_samples_per_second": 435.193,
"eval_steps_per_second": 27.2,
"step": 300
},
{
"epoch": 0.031,
"grad_norm": 0.5040959715843201,
"learning_rate": 6.18e-05,
"loss": 2.5207,
"step": 310
},
{
"epoch": 0.032,
"grad_norm": 0.5737811923027039,
"learning_rate": 6.38e-05,
"loss": 2.5029,
"step": 320
},
{
"epoch": 0.033,
"grad_norm": 0.5511871576309204,
"learning_rate": 6.58e-05,
"loss": 2.4785,
"step": 330
},
{
"epoch": 0.034,
"grad_norm": 0.5300912857055664,
"learning_rate": 6.780000000000001e-05,
"loss": 2.451,
"step": 340
},
{
"epoch": 0.035,
"grad_norm": 0.5354319214820862,
"learning_rate": 6.98e-05,
"loss": 2.4384,
"step": 350
},
{
"epoch": 0.035,
"eval_loss": 2.447114944458008,
"eval_runtime": 4.6248,
"eval_samples_per_second": 432.454,
"eval_steps_per_second": 27.028,
"step": 350
},
{
"epoch": 0.036,
"grad_norm": 0.5186896324157715,
"learning_rate": 7.18e-05,
"loss": 2.3922,
"step": 360
},
{
"epoch": 0.037,
"grad_norm": 0.6687894463539124,
"learning_rate": 7.38e-05,
"loss": 2.3011,
"step": 370
},
{
"epoch": 0.038,
"grad_norm": 0.6902775764465332,
"learning_rate": 7.58e-05,
"loss": 2.3917,
"step": 380
},
{
"epoch": 0.039,
"grad_norm": 0.5241763591766357,
"learning_rate": 7.780000000000001e-05,
"loss": 2.4254,
"step": 390
},
{
"epoch": 0.04,
"grad_norm": 0.5403384566307068,
"learning_rate": 7.98e-05,
"loss": 2.3213,
"step": 400
},
{
"epoch": 0.04,
"eval_loss": 2.4054787158966064,
"eval_runtime": 4.6055,
"eval_samples_per_second": 434.265,
"eval_steps_per_second": 27.142,
"step": 400
},
{
"epoch": 0.041,
"grad_norm": 0.46202412247657776,
"learning_rate": 8.18e-05,
"loss": 2.3921,
"step": 410
},
{
"epoch": 0.042,
"grad_norm": 0.5079010725021362,
"learning_rate": 8.38e-05,
"loss": 2.3346,
"step": 420
},
{
"epoch": 0.043,
"grad_norm": 0.4820123612880707,
"learning_rate": 8.58e-05,
"loss": 2.2619,
"step": 430
},
{
"epoch": 0.044,
"grad_norm": 0.7787259817123413,
"learning_rate": 8.78e-05,
"loss": 2.2583,
"step": 440
},
{
"epoch": 0.045,
"grad_norm": 0.4925244152545929,
"learning_rate": 8.98e-05,
"loss": 2.3056,
"step": 450
},
{
"epoch": 0.045,
"eval_loss": 2.36269474029541,
"eval_runtime": 4.6434,
"eval_samples_per_second": 430.723,
"eval_steps_per_second": 26.92,
"step": 450
},
{
"epoch": 0.046,
"grad_norm": 0.49801623821258545,
"learning_rate": 9.180000000000001e-05,
"loss": 2.3533,
"step": 460
},
{
"epoch": 0.047,
"grad_norm": 0.5879547595977783,
"learning_rate": 9.38e-05,
"loss": 2.3018,
"step": 470
},
{
"epoch": 0.048,
"grad_norm": 0.4898471236228943,
"learning_rate": 9.58e-05,
"loss": 2.3325,
"step": 480
},
{
"epoch": 0.049,
"grad_norm": 0.5592429041862488,
"learning_rate": 9.78e-05,
"loss": 2.2995,
"step": 490
},
{
"epoch": 0.05,
"grad_norm": 0.6377447843551636,
"learning_rate": 9.98e-05,
"loss": 2.2615,
"step": 500
},
{
"epoch": 0.05,
"eval_loss": 2.3109450340270996,
"eval_runtime": 4.6129,
"eval_samples_per_second": 433.566,
"eval_steps_per_second": 27.098,
"step": 500
},
{
"epoch": 0.051,
"grad_norm": 0.4234420955181122,
"learning_rate": 9.911197057469107e-05,
"loss": 2.2316,
"step": 510
},
{
"epoch": 0.052,
"grad_norm": 0.40216103196144104,
"learning_rate": 9.815249038111776e-05,
"loss": 2.2426,
"step": 520
},
{
"epoch": 0.053,
"grad_norm": 0.46068865060806274,
"learning_rate": 9.722034684781694e-05,
"loss": 2.2844,
"step": 530
},
{
"epoch": 0.054,
"grad_norm": 0.3902567923069,
"learning_rate": 9.631426606617744e-05,
"loss": 2.1736,
"step": 540
},
{
"epoch": 0.055,
"grad_norm": 0.4389937222003937,
"learning_rate": 9.543305571897804e-05,
"loss": 2.2481,
"step": 550
},
{
"epoch": 0.055,
"eval_loss": 2.266875982284546,
"eval_runtime": 4.6373,
"eval_samples_per_second": 431.283,
"eval_steps_per_second": 26.955,
"step": 550
},
{
"epoch": 0.056,
"grad_norm": 0.4390330910682678,
"learning_rate": 9.457559848219179e-05,
"loss": 2.2735,
"step": 560
},
{
"epoch": 0.057,
"grad_norm": 0.440461128950119,
"learning_rate": 9.374084606744877e-05,
"loss": 2.2337,
"step": 570
},
{
"epoch": 0.058,
"grad_norm": 0.5304872393608093,
"learning_rate": 9.292781383291611e-05,
"loss": 2.2897,
"step": 580
},
{
"epoch": 0.059,
"grad_norm": 0.4079591929912567,
"learning_rate": 9.213557589959345e-05,
"loss": 2.2064,
"step": 590
},
{
"epoch": 0.06,
"grad_norm": 0.4388200640678406,
"learning_rate": 9.136326071794409e-05,
"loss": 2.1916,
"step": 600
},
{
"epoch": 0.06,
"eval_loss": 2.2343385219573975,
"eval_runtime": 4.6153,
"eval_samples_per_second": 433.343,
"eval_steps_per_second": 27.084,
"step": 600
},
{
"epoch": 0.061,
"grad_norm": 0.4618544280529022,
"learning_rate": 9.061004703659374e-05,
"loss": 2.2159,
"step": 610
},
{
"epoch": 0.062,
"grad_norm": 0.42200735211372375,
"learning_rate": 8.987516023070193e-05,
"loss": 2.1533,
"step": 620
},
{
"epoch": 0.063,
"grad_norm": 0.5431024432182312,
"learning_rate": 8.915786895268651e-05,
"loss": 2.194,
"step": 630
},
{
"epoch": 0.064,
"grad_norm": 0.4802396595478058,
"learning_rate": 8.84574820723792e-05,
"loss": 2.1996,
"step": 640
},
{
"epoch": 0.065,
"grad_norm": 0.5224118828773499,
"learning_rate": 8.777334587751072e-05,
"loss": 2.1971,
"step": 650
},
{
"epoch": 0.065,
"eval_loss": 2.1872010231018066,
"eval_runtime": 4.6406,
"eval_samples_per_second": 430.98,
"eval_steps_per_second": 26.936,
"step": 650
},
{
"epoch": 0.066,
"grad_norm": 0.46909448504447937,
"learning_rate": 8.710484150874758e-05,
"loss": 2.1833,
"step": 660
},
{
"epoch": 0.067,
"grad_norm": 0.43358996510505676,
"learning_rate": 8.645138260640511e-05,
"loss": 2.1338,
"step": 670
},
{
"epoch": 0.068,
"grad_norm": 0.6461070775985718,
"learning_rate": 8.581241314849611e-05,
"loss": 2.0534,
"step": 680
},
{
"epoch": 0.069,
"grad_norm": 0.5167747735977173,
"learning_rate": 8.51874054619982e-05,
"loss": 2.0476,
"step": 690
},
{
"epoch": 0.07,
"grad_norm": 0.6826158165931702,
"learning_rate": 8.457585839117285e-05,
"loss": 2.1696,
"step": 700
},
{
"epoch": 0.07,
"eval_loss": 2.1201064586639404,
"eval_runtime": 4.6341,
"eval_samples_per_second": 431.585,
"eval_steps_per_second": 26.974,
"step": 700
},
{
"epoch": 0.071,
"grad_norm": 0.5946829319000244,
"learning_rate": 8.397729560848631e-05,
"loss": 2.1121,
"step": 710
},
{
"epoch": 0.072,
"grad_norm": 0.6427505612373352,
"learning_rate": 8.339126405519483e-05,
"loss": 2.0529,
"step": 720
},
{
"epoch": 0.073,
"grad_norm": 0.8502755761146545,
"learning_rate": 8.281733249999222e-05,
"loss": 2.0783,
"step": 730
},
{
"epoch": 0.074,
"grad_norm": 0.5335196852684021,
"learning_rate": 8.225509020529976e-05,
"loss": 2.0196,
"step": 740
},
{
"epoch": 0.075,
"grad_norm": 0.5112911462783813,
"learning_rate": 8.170414569182506e-05,
"loss": 1.9775,
"step": 750
},
{
"epoch": 0.075,
"eval_loss": 2.051039934158325,
"eval_runtime": 4.6427,
"eval_samples_per_second": 430.782,
"eval_steps_per_second": 26.924,
"step": 750
},
{
"epoch": 0.076,
"grad_norm": 0.6587328910827637,
"learning_rate": 8.116412559294567e-05,
"loss": 2.0011,
"step": 760
},
{
"epoch": 0.077,
"grad_norm": 0.4874884784221649,
"learning_rate": 8.063467359130037e-05,
"loss": 2.0269,
"step": 770
},
{
"epoch": 0.078,
"grad_norm": 0.5324885845184326,
"learning_rate": 8.011544943070565e-05,
"loss": 1.92,
"step": 780
},
{
"epoch": 0.079,
"grad_norm": 1.048747181892395,
"learning_rate": 7.960612799717214e-05,
"loss": 2.0112,
"step": 790
},
{
"epoch": 0.08,
"grad_norm": 0.5203439593315125,
"learning_rate": 7.910639846338163e-05,
"loss": 2.0567,
"step": 800
},
{
"epoch": 0.08,
"eval_loss": 2.01043438911438,
"eval_runtime": 4.6186,
"eval_samples_per_second": 433.034,
"eval_steps_per_second": 27.065,
"step": 800
},
{
"epoch": 0.081,
"grad_norm": 0.526197075843811,
"learning_rate": 7.861596349150974e-05,
"loss": 2.0106,
"step": 810
},
{
"epoch": 0.082,
"grad_norm": 0.5369076132774353,
"learning_rate": 7.813453848974926e-05,
"loss": 2.0475,
"step": 820
},
{
"epoch": 0.083,
"grad_norm": 0.5480849742889404,
"learning_rate": 7.766185091831061e-05,
"loss": 1.9635,
"step": 830
},
{
"epoch": 0.084,
"grad_norm": 0.5581432580947876,
"learning_rate": 7.719763964105497e-05,
"loss": 1.9332,
"step": 840
},
{
"epoch": 0.085,
"grad_norm": 0.45510992407798767,
"learning_rate": 7.674165431925523e-05,
"loss": 1.9597,
"step": 850
},
{
"epoch": 0.085,
"eval_loss": 1.9628287553787231,
"eval_runtime": 4.6152,
"eval_samples_per_second": 433.353,
"eval_steps_per_second": 27.085,
"step": 850
},
{
"epoch": 0.086,
"grad_norm": 0.5276203155517578,
"learning_rate": 7.629365484428845e-05,
"loss": 1.9356,
"step": 860
},
{
"epoch": 0.087,
"grad_norm": 0.5071442723274231,
"learning_rate": 7.585341080633831e-05,
"loss": 2.0046,
"step": 870
},
{
"epoch": 0.088,
"grad_norm": 0.4676225483417511,
"learning_rate": 7.542070099643788e-05,
"loss": 1.9443,
"step": 880
},
{
"epoch": 0.089,
"grad_norm": 0.5725594758987427,
"learning_rate": 7.499531293940736e-05,
"loss": 1.8964,
"step": 890
},
{
"epoch": 0.09,
"grad_norm": 0.5482536554336548,
"learning_rate": 7.457704245544709e-05,
"loss": 1.9238,
"step": 900
},
{
"epoch": 0.09,
"eval_loss": 1.927959680557251,
"eval_runtime": 4.609,
"eval_samples_per_second": 433.932,
"eval_steps_per_second": 27.121,
"step": 900
},
{
"epoch": 0.091,
"grad_norm": 0.4957098960876465,
"learning_rate": 7.41656932483308e-05,
"loss": 1.937,
"step": 910
},
{
"epoch": 0.092,
"grad_norm": 0.5255835652351379,
"learning_rate": 7.376107651831263e-05,
"loss": 1.955,
"step": 920
},
{
"epoch": 0.093,
"grad_norm": 0.41037118434906006,
"learning_rate": 7.336301059801394e-05,
"loss": 1.8897,
"step": 930
},
{
"epoch": 0.094,
"grad_norm": 0.6042472720146179,
"learning_rate": 7.297132060969499e-05,
"loss": 1.8852,
"step": 940
},
{
"epoch": 0.095,
"grad_norm": 0.5588452219963074,
"learning_rate": 7.258583814244268e-05,
"loss": 1.8786,
"step": 950
},
{
"epoch": 0.095,
"eval_loss": 1.892114520072937,
"eval_runtime": 4.6266,
"eval_samples_per_second": 432.278,
"eval_steps_per_second": 27.017,
"step": 950
},
{
"epoch": 0.096,
"grad_norm": 0.5261280536651611,
"learning_rate": 7.220640094792103e-05,
"loss": 1.9327,
"step": 960
},
{
"epoch": 0.097,
"grad_norm": 0.5294102430343628,
"learning_rate": 7.183285265343593e-05,
"loss": 1.925,
"step": 970
},
{
"epoch": 0.098,
"grad_norm": 0.4203556776046753,
"learning_rate": 7.14650424911616e-05,
"loss": 1.8371,
"step": 980
},
{
"epoch": 0.099,
"grad_norm": 0.5418606996536255,
"learning_rate": 7.110282504246376e-05,
"loss": 1.8636,
"step": 990
},
{
"epoch": 0.1,
"grad_norm": 0.5804753303527832,
"learning_rate": 7.074605999633481e-05,
"loss": 1.8812,
"step": 1000
},
{
"epoch": 0.1,
"eval_loss": 1.8822709321975708,
"eval_runtime": 4.6061,
"eval_samples_per_second": 434.203,
"eval_steps_per_second": 27.138,
"step": 1000
},
{
"epoch": 0.101,
"grad_norm": 0.4180801510810852,
"learning_rate": 7.03946119210298e-05,
"loss": 1.9037,
"step": 1010
},
{
"epoch": 0.102,
"grad_norm": 0.40715548396110535,
"learning_rate": 7.004835004805859e-05,
"loss": 1.8844,
"step": 1020
},
{
"epoch": 0.103,
"grad_norm": 0.6151288747787476,
"learning_rate": 6.970714806775237e-05,
"loss": 1.8651,
"step": 1030
},
{
"epoch": 0.104,
"grad_norm": 0.45094242691993713,
"learning_rate": 6.937088393567812e-05,
"loss": 1.8962,
"step": 1040
},
{
"epoch": 0.105,
"grad_norm": 0.5133697390556335,
"learning_rate": 6.903943968922749e-05,
"loss": 1.8309,
"step": 1050
},
{
"epoch": 0.105,
"eval_loss": 1.8607650995254517,
"eval_runtime": 4.6375,
"eval_samples_per_second": 431.269,
"eval_steps_per_second": 26.954,
"step": 1050
},
{
"epoch": 0.106,
"grad_norm": 0.502295196056366,
"learning_rate": 6.871270127375409e-05,
"loss": 1.8592,
"step": 1060
},
{
"epoch": 0.107,
"grad_norm": 0.46987488865852356,
"learning_rate": 6.839055837767724e-05,
"loss": 1.7896,
"step": 1070
},
{
"epoch": 0.108,
"grad_norm": 0.5071763396263123,
"learning_rate": 6.807290427601058e-05,
"loss": 1.7998,
"step": 1080
},
{
"epoch": 0.109,
"grad_norm": 0.43212854862213135,
"learning_rate": 6.775963568181182e-05,
"loss": 1.8668,
"step": 1090
},
{
"epoch": 0.11,
"grad_norm": 0.548732340335846,
"learning_rate": 6.74506526050837e-05,
"loss": 1.8052,
"step": 1100
},
{
"epoch": 0.11,
"eval_loss": 1.84357750415802,
"eval_runtime": 4.6161,
"eval_samples_per_second": 433.267,
"eval_steps_per_second": 27.079,
"step": 1100
},
{
"epoch": 0.111,
"grad_norm": 0.4961816966533661,
"learning_rate": 6.714585821868878e-05,
"loss": 1.7435,
"step": 1110
},
{
"epoch": 0.112,
"grad_norm": 0.48313409090042114,
"learning_rate": 6.68451587308695e-05,
"loss": 1.8251,
"step": 1120
},
{
"epoch": 0.113,
"grad_norm": 0.49864089488983154,
"learning_rate": 6.654846326399234e-05,
"loss": 1.828,
"step": 1130
},
{
"epoch": 0.114,
"grad_norm": 0.4988366961479187,
"learning_rate": 6.625568373916034e-05,
"loss": 1.8395,
"step": 1140
},
{
"epoch": 0.115,
"grad_norm": 0.46008503437042236,
"learning_rate": 6.596673476636102e-05,
"loss": 1.8266,
"step": 1150
},
{
"epoch": 0.115,
"eval_loss": 1.8144086599349976,
"eval_runtime": 4.6156,
"eval_samples_per_second": 433.312,
"eval_steps_per_second": 27.082,
"step": 1150
},
{
"epoch": 0.116,
"grad_norm": 0.5102156400680542,
"learning_rate": 6.568153353983866e-05,
"loss": 1.878,
"step": 1160
},
{
"epoch": 0.117,
"grad_norm": 0.4590121805667877,
"learning_rate": 6.53999997384e-05,
"loss": 1.8488,
"step": 1170
},
{
"epoch": 0.118,
"grad_norm": 0.4306875467300415,
"learning_rate": 6.512205543038029e-05,
"loss": 1.7797,
"step": 1180
},
{
"epoch": 0.119,
"grad_norm": 0.5295641422271729,
"learning_rate": 6.48476249830151e-05,
"loss": 1.7646,
"step": 1190
},
{
"epoch": 0.12,
"grad_norm": 0.4984031319618225,
"learning_rate": 6.457663497597783e-05,
"loss": 1.8432,
"step": 1200
},
{
"epoch": 0.12,
"eval_loss": 1.806372880935669,
"eval_runtime": 4.603,
"eval_samples_per_second": 434.496,
"eval_steps_per_second": 27.156,
"step": 1200
},
{
"epoch": 0.121,
"grad_norm": 0.42529740929603577,
"learning_rate": 6.430901411885911e-05,
"loss": 1.7983,
"step": 1210
},
{
"epoch": 0.122,
"grad_norm": 0.44186535477638245,
"learning_rate": 6.40446931723768e-05,
"loss": 1.7505,
"step": 1220
},
{
"epoch": 0.123,
"grad_norm": 0.5641934871673584,
"learning_rate": 6.378360487311965e-05,
"loss": 1.7354,
"step": 1230
},
{
"epoch": 0.124,
"grad_norm": 0.49620485305786133,
"learning_rate": 6.352568386163805e-05,
"loss": 1.8056,
"step": 1240
},
{
"epoch": 0.125,
"grad_norm": 0.43371838331222534,
"learning_rate": 6.327086661370808e-05,
"loss": 1.7984,
"step": 1250
},
{
"epoch": 0.125,
"eval_loss": 1.7932993173599243,
"eval_runtime": 4.6132,
"eval_samples_per_second": 433.54,
"eval_steps_per_second": 27.096,
"step": 1250
},
{
"epoch": 0.126,
"grad_norm": 0.5027583837509155,
"learning_rate": 6.301909137460409e-05,
"loss": 1.688,
"step": 1260
},
{
"epoch": 0.127,
"grad_norm": 0.5743605494499207,
"learning_rate": 6.277029809622579e-05,
"loss": 1.795,
"step": 1270
},
{
"epoch": 0.128,
"grad_norm": 0.4767725169658661,
"learning_rate": 6.252442837693433e-05,
"loss": 1.7317,
"step": 1280
},
{
"epoch": 0.129,
"grad_norm": 0.45642659068107605,
"learning_rate": 6.22814254039606e-05,
"loss": 1.7105,
"step": 1290
},
{
"epoch": 0.13,
"grad_norm": 0.5867980122566223,
"learning_rate": 6.204123389825647e-05,
"loss": 1.8047,
"step": 1300
},
{
"epoch": 0.13,
"eval_loss": 1.7906692028045654,
"eval_runtime": 6.1816,
"eval_samples_per_second": 323.54,
"eval_steps_per_second": 20.221,
"step": 1300
},
{
"epoch": 0.131,
"grad_norm": 0.4501126706600189,
"learning_rate": 6.180380006166808e-05,
"loss": 1.8556,
"step": 1310
},
{
"epoch": 0.132,
"grad_norm": 0.49103260040283203,
"learning_rate": 6.156907152631576e-05,
"loss": 1.8429,
"step": 1320
},
{
"epoch": 0.133,
"grad_norm": 0.4130401313304901,
"learning_rate": 6.133699730607301e-05,
"loss": 1.7908,
"step": 1330
},
{
"epoch": 0.134,
"grad_norm": 0.5524632334709167,
"learning_rate": 6.110752775004223e-05,
"loss": 1.7754,
"step": 1340
},
{
"epoch": 0.135,
"grad_norm": 0.4153362214565277,
"learning_rate": 6.088061449793082e-05,
"loss": 1.7705,
"step": 1350
},
{
"epoch": 0.135,
"eval_loss": 1.778767466545105,
"eval_runtime": 4.6221,
"eval_samples_per_second": 432.704,
"eval_steps_per_second": 27.044,
"step": 1350
},
{
"epoch": 0.136,
"grad_norm": 0.4490492641925812,
"learning_rate": 6.065621043723658e-05,
"loss": 1.7255,
"step": 1360
},
{
"epoch": 0.137,
"grad_norm": 0.453517884016037,
"learning_rate": 6.043426966215649e-05,
"loss": 1.7775,
"step": 1370
},
{
"epoch": 0.138,
"grad_norm": 0.48418131470680237,
"learning_rate": 6.021474743413714e-05,
"loss": 1.8089,
"step": 1380
},
{
"epoch": 0.139,
"grad_norm": 0.44093987345695496,
"learning_rate": 5.999760014399041e-05,
"loss": 1.7662,
"step": 1390
},
{
"epoch": 0.14,
"grad_norm": 0.39292681217193604,
"learning_rate": 5.978278527550084e-05,
"loss": 1.7712,
"step": 1400
},
{
"epoch": 0.14,
"eval_loss": 1.7568821907043457,
"eval_runtime": 4.613,
"eval_samples_per_second": 433.554,
"eval_steps_per_second": 27.097,
"step": 1400
},
{
"epoch": 0.141,
"grad_norm": 0.4755737781524658,
"learning_rate": 5.957026137045648e-05,
"loss": 1.761,
"step": 1410
},
{
"epoch": 0.142,
"grad_norm": 0.4488621652126312,
"learning_rate": 5.935998799503725e-05,
"loss": 1.7299,
"step": 1420
},
{
"epoch": 0.143,
"grad_norm": 0.44802042841911316,
"learning_rate": 5.91519257074994e-05,
"loss": 1.7244,
"step": 1430
},
{
"epoch": 0.144,
"grad_norm": 0.481559157371521,
"learning_rate": 5.8946036027097295e-05,
"loss": 1.7574,
"step": 1440
},
{
"epoch": 0.145,
"grad_norm": 0.39685600996017456,
"learning_rate": 5.8742281404186785e-05,
"loss": 1.7684,
"step": 1450
},
{
"epoch": 0.145,
"eval_loss": 1.7553154230117798,
"eval_runtime": 4.6273,
"eval_samples_per_second": 432.213,
"eval_steps_per_second": 27.013,
"step": 1450
},
{
"epoch": 0.146,
"grad_norm": 0.5256659984588623,
"learning_rate": 5.8540625191457576e-05,
"loss": 1.7294,
"step": 1460
},
{
"epoch": 0.147,
"grad_norm": 0.4324532449245453,
"learning_rate": 5.834103161624459e-05,
"loss": 1.678,
"step": 1470
},
{
"epoch": 0.148,
"grad_norm": 0.4632064402103424,
"learning_rate": 5.8143465753870694e-05,
"loss": 1.7198,
"step": 1480
},
{
"epoch": 0.149,
"grad_norm": 0.4310835301876068,
"learning_rate": 5.7947893501975715e-05,
"loss": 1.7345,
"step": 1490
},
{
"epoch": 0.15,
"grad_norm": 0.39495712518692017,
"learning_rate": 5.7754281555789e-05,
"loss": 1.7207,
"step": 1500
},
{
"epoch": 0.15,
"eval_loss": 1.7468315362930298,
"eval_runtime": 4.6195,
"eval_samples_per_second": 432.948,
"eval_steps_per_second": 27.059,
"step": 1500
},
{
"epoch": 0.151,
"grad_norm": 0.47850221395492554,
"learning_rate": 5.756259738430475e-05,
"loss": 1.6935,
"step": 1510
},
{
"epoch": 0.152,
"grad_norm": 0.5015422105789185,
"learning_rate": 5.7372809207321355e-05,
"loss": 1.73,
"step": 1520
},
{
"epoch": 0.153,
"grad_norm": 0.5264328718185425,
"learning_rate": 5.71848859733081e-05,
"loss": 1.7577,
"step": 1530
},
{
"epoch": 0.154,
"grad_norm": 0.4855109751224518,
"learning_rate": 5.699879733806412e-05,
"loss": 1.7081,
"step": 1540
},
{
"epoch": 0.155,
"grad_norm": 0.4310589134693146,
"learning_rate": 5.681451364413635e-05,
"loss": 1.6542,
"step": 1550
},
{
"epoch": 0.155,
"eval_loss": 1.7462912797927856,
"eval_runtime": 5.0409,
"eval_samples_per_second": 396.753,
"eval_steps_per_second": 24.797,
"step": 1550
},
{
"epoch": 0.156,
"grad_norm": 0.4904212951660156,
"learning_rate": 5.663200590096471e-05,
"loss": 1.7219,
"step": 1560
},
{
"epoch": 0.157,
"grad_norm": 0.46510931849479675,
"learning_rate": 5.645124576572452e-05,
"loss": 1.7661,
"step": 1570
},
{
"epoch": 0.158,
"grad_norm": 0.48309725522994995,
"learning_rate": 5.627220552483715e-05,
"loss": 1.7406,
"step": 1580
},
{
"epoch": 0.159,
"grad_norm": 0.4559168815612793,
"learning_rate": 5.609485807612173e-05,
"loss": 1.7269,
"step": 1590
},
{
"epoch": 0.16,
"grad_norm": 0.3912065625190735,
"learning_rate": 5.591917691156175e-05,
"loss": 1.6987,
"step": 1600
},
{
"epoch": 0.16,
"eval_loss": 1.7371795177459717,
"eval_runtime": 4.6084,
"eval_samples_per_second": 433.989,
"eval_steps_per_second": 27.124,
"step": 1600
},
{
"epoch": 0.161,
"grad_norm": 0.4040866792201996,
"learning_rate": 5.5745136100661674e-05,
"loss": 1.7261,
"step": 1610
},
{
"epoch": 0.162,
"grad_norm": 0.3926049470901489,
"learning_rate": 5.557271027436971e-05,
"loss": 1.7449,
"step": 1620
},
{
"epoch": 0.163,
"grad_norm": 0.453355997800827,
"learning_rate": 5.540187460954447e-05,
"loss": 1.7382,
"step": 1630
},
{
"epoch": 0.164,
"grad_norm": 0.41926702857017517,
"learning_rate": 5.523260481394348e-05,
"loss": 1.7363,
"step": 1640
},
{
"epoch": 0.165,
"grad_norm": 0.506650447845459,
"learning_rate": 5.506487711171322e-05,
"loss": 1.7409,
"step": 1650
},
{
"epoch": 0.165,
"eval_loss": 1.729161024093628,
"eval_runtime": 4.9479,
"eval_samples_per_second": 404.214,
"eval_steps_per_second": 25.263,
"step": 1650
},
{
"epoch": 0.166,
"grad_norm": 0.45161449909210205,
"learning_rate": 5.489866822936095e-05,
"loss": 1.7375,
"step": 1660
},
{
"epoch": 0.167,
"grad_norm": 0.42479032278060913,
"learning_rate": 5.4733955382189484e-05,
"loss": 1.6951,
"step": 1670
},
{
"epoch": 0.168,
"grad_norm": 0.40763387084007263,
"learning_rate": 5.457071626117703e-05,
"loss": 1.6684,
"step": 1680
},
{
"epoch": 0.169,
"grad_norm": 0.45459604263305664,
"learning_rate": 5.440892902028488e-05,
"loss": 1.6781,
"step": 1690
},
{
"epoch": 0.17,
"grad_norm": 0.44360917806625366,
"learning_rate": 5.424857226417659e-05,
"loss": 1.7428,
"step": 1700
},
{
"epoch": 0.17,
"eval_loss": 1.723348617553711,
"eval_runtime": 6.9404,
"eval_samples_per_second": 288.166,
"eval_steps_per_second": 18.01,
"step": 1700
},
{
"epoch": 0.171,
"grad_norm": 0.38958659768104553,
"learning_rate": 5.408962503633292e-05,
"loss": 1.7107,
"step": 1710
},
{
"epoch": 0.172,
"grad_norm": 0.5742718577384949,
"learning_rate": 5.39320668075478e-05,
"loss": 1.7336,
"step": 1720
},
{
"epoch": 0.173,
"grad_norm": 0.4499475359916687,
"learning_rate": 5.3775877464790436e-05,
"loss": 1.771,
"step": 1730
},
{
"epoch": 0.174,
"grad_norm": 0.4566386640071869,
"learning_rate": 5.362103730042052e-05,
"loss": 1.7279,
"step": 1740
},
{
"epoch": 0.175,
"grad_norm": 0.3985291123390198,
"learning_rate": 5.346752700174288e-05,
"loss": 1.6383,
"step": 1750
},
{
"epoch": 0.175,
"eval_loss": 1.7142506837844849,
"eval_runtime": 4.639,
"eval_samples_per_second": 431.132,
"eval_steps_per_second": 26.946,
"step": 1750
},
{
"epoch": 0.176,
"grad_norm": 0.48121654987335205,
"learning_rate": 5.331532764088928e-05,
"loss": 1.7232,
"step": 1760
},
{
"epoch": 0.177,
"grad_norm": 0.44289764761924744,
"learning_rate": 5.316442066501519e-05,
"loss": 1.7705,
"step": 1770
},
{
"epoch": 0.178,
"grad_norm": 0.39872604608535767,
"learning_rate": 5.30147878868001e-05,
"loss": 1.7136,
"step": 1780
},
{
"epoch": 0.179,
"grad_norm": 0.428268700838089,
"learning_rate": 5.2866411475240354e-05,
"loss": 1.658,
"step": 1790
},
{
"epoch": 0.18,
"grad_norm": 0.5536085963249207,
"learning_rate": 5.2719273946723746e-05,
"loss": 1.6961,
"step": 1800
},
{
"epoch": 0.18,
"eval_loss": 1.719687581062317,
"eval_runtime": 4.6069,
"eval_samples_per_second": 434.128,
"eval_steps_per_second": 27.133,
"step": 1800
},
{
"epoch": 0.181,
"grad_norm": 0.4429076611995697,
"learning_rate": 5.257335815637598e-05,
"loss": 1.7568,
"step": 1810
},
{
"epoch": 0.182,
"grad_norm": 0.4638884961605072,
"learning_rate": 5.242864728966902e-05,
"loss": 1.7089,
"step": 1820
},
{
"epoch": 0.183,
"grad_norm": 0.48520177602767944,
"learning_rate": 5.2285124854282266e-05,
"loss": 1.6566,
"step": 1830
},
{
"epoch": 0.184,
"grad_norm": 0.4005000591278076,
"learning_rate": 5.2142774672207326e-05,
"loss": 1.6203,
"step": 1840
},
{
"epoch": 0.185,
"grad_norm": 0.3970281183719635,
"learning_rate": 5.200158087208814e-05,
"loss": 1.6356,
"step": 1850
},
{
"epoch": 0.185,
"eval_loss": 1.704606056213379,
"eval_runtime": 4.6253,
"eval_samples_per_second": 432.402,
"eval_steps_per_second": 27.025,
"step": 1850
},
{
"epoch": 0.186,
"grad_norm": 0.4269203245639801,
"learning_rate": 5.186152788178785e-05,
"loss": 1.6598,
"step": 1860
},
{
"epoch": 0.187,
"grad_norm": 0.3638952672481537,
"learning_rate": 5.172260042117486e-05,
"loss": 1.6282,
"step": 1870
},
{
"epoch": 0.188,
"grad_norm": 0.38531213998794556,
"learning_rate": 5.1584783495120195e-05,
"loss": 1.6547,
"step": 1880
},
{
"epoch": 0.189,
"grad_norm": 0.4625402092933655,
"learning_rate": 5.1448062386699125e-05,
"loss": 1.6738,
"step": 1890
},
{
"epoch": 0.19,
"grad_norm": 0.4621464014053345,
"learning_rate": 5.1312422650589934e-05,
"loss": 1.6875,
"step": 1900
},
{
"epoch": 0.19,
"eval_loss": 1.698980689048767,
"eval_runtime": 4.6172,
"eval_samples_per_second": 433.159,
"eval_steps_per_second": 27.072,
"step": 1900
},
{
"epoch": 0.191,
"grad_norm": 0.44431203603744507,
"learning_rate": 5.117785010666307e-05,
"loss": 1.6827,
"step": 1910
},
{
"epoch": 0.192,
"grad_norm": 0.4711743891239166,
"learning_rate": 5.104433083375434e-05,
"loss": 1.7471,
"step": 1920
},
{
"epoch": 0.193,
"grad_norm": 0.42140287160873413,
"learning_rate": 5.091185116361582e-05,
"loss": 1.724,
"step": 1930
},
{
"epoch": 0.194,
"grad_norm": 0.4742933213710785,
"learning_rate": 5.0780397675038636e-05,
"loss": 1.7009,
"step": 1940
},
{
"epoch": 0.195,
"grad_norm": 0.42618677020072937,
"learning_rate": 5.0649957188141786e-05,
"loss": 1.6447,
"step": 1950
},
{
"epoch": 0.195,
"eval_loss": 1.696645975112915,
"eval_runtime": 4.6168,
"eval_samples_per_second": 433.201,
"eval_steps_per_second": 27.075,
"step": 1950
},
{
"epoch": 0.196,
"grad_norm": 0.4423786997795105,
"learning_rate": 5.052051675882142e-05,
"loss": 1.6871,
"step": 1960
},
{
"epoch": 0.197,
"grad_norm": 0.45436352491378784,
"learning_rate": 5.0392063673355584e-05,
"loss": 1.6682,
"step": 1970
},
{
"epoch": 0.198,
"grad_norm": 0.43426281213760376,
"learning_rate": 5.026458544315881e-05,
"loss": 1.6709,
"step": 1980
},
{
"epoch": 0.199,
"grad_norm": 0.4324936270713806,
"learning_rate": 5.013806979968224e-05,
"loss": 1.6365,
"step": 1990
},
{
"epoch": 0.2,
"grad_norm": 0.4487122893333435,
"learning_rate": 5.0012504689453974e-05,
"loss": 1.6461,
"step": 2000
},
{
"epoch": 0.2,
"eval_loss": 1.6922798156738281,
"eval_runtime": 4.5917,
"eval_samples_per_second": 435.564,
"eval_steps_per_second": 27.223,
"step": 2000
},
{
"epoch": 0.201,
"grad_norm": 0.42944806814193726,
"learning_rate": 4.988787826925559e-05,
"loss": 1.6658,
"step": 2010
},
{
"epoch": 0.202,
"grad_norm": 0.5247456431388855,
"learning_rate": 4.9764178901430006e-05,
"loss": 1.6671,
"step": 2020
},
{
"epoch": 0.203,
"grad_norm": 0.4485164284706116,
"learning_rate": 4.9641395149316935e-05,
"loss": 1.6854,
"step": 2030
},
{
"epoch": 0.204,
"grad_norm": 0.4638156592845917,
"learning_rate": 4.951951577281135e-05,
"loss": 1.7201,
"step": 2040
},
{
"epoch": 0.205,
"grad_norm": 0.42195677757263184,
"learning_rate": 4.939852972404162e-05,
"loss": 1.6711,
"step": 2050
},
{
"epoch": 0.205,
"eval_loss": 1.6950935125350952,
"eval_runtime": 4.6116,
"eval_samples_per_second": 433.685,
"eval_steps_per_second": 27.105,
"step": 2050
},
{
"epoch": 0.206,
"grad_norm": 0.3974883556365967,
"learning_rate": 4.927842614316289e-05,
"loss": 1.638,
"step": 2060
},
{
"epoch": 0.207,
"grad_norm": 0.4347898066043854,
"learning_rate": 4.9159194354262706e-05,
"loss": 1.6931,
"step": 2070
},
{
"epoch": 0.208,
"grad_norm": 0.4808084964752197,
"learning_rate": 4.904082386137498e-05,
"loss": 1.725,
"step": 2080
},
{
"epoch": 0.209,
"grad_norm": 0.48091718554496765,
"learning_rate": 4.892330434459896e-05,
"loss": 1.7049,
"step": 2090
},
{
"epoch": 0.21,
"grad_norm": 0.4456811249256134,
"learning_rate": 4.880662565632016e-05,
"loss": 1.6487,
"step": 2100
},
{
"epoch": 0.21,
"eval_loss": 1.688322901725769,
"eval_runtime": 4.5926,
"eval_samples_per_second": 435.484,
"eval_steps_per_second": 27.218,
"step": 2100
},
{
"epoch": 0.211,
"grad_norm": 0.43593478202819824,
"learning_rate": 4.869077781752983e-05,
"loss": 1.6993,
"step": 2110
},
{
"epoch": 0.212,
"grad_norm": 0.4544734060764313,
"learning_rate": 4.857575101424013e-05,
"loss": 1.6894,
"step": 2120
},
{
"epoch": 0.213,
"grad_norm": 0.42114976048469543,
"learning_rate": 4.846153559399206e-05,
"loss": 1.6745,
"step": 2130
},
{
"epoch": 0.214,
"grad_norm": 0.4465537369251251,
"learning_rate": 4.834812206245318e-05,
"loss": 1.6264,
"step": 2140
},
{
"epoch": 0.215,
"grad_norm": 0.4641367793083191,
"learning_rate": 4.8235501080102624e-05,
"loss": 1.7031,
"step": 2150
},
{
"epoch": 0.215,
"eval_loss": 1.6830954551696777,
"eval_runtime": 4.625,
"eval_samples_per_second": 432.43,
"eval_steps_per_second": 27.027,
"step": 2150
},
{
"epoch": 0.216,
"grad_norm": 0.4330129027366638,
"learning_rate": 4.812366345900056e-05,
"loss": 1.6766,
"step": 2160
},
{
"epoch": 0.217,
"grad_norm": 0.4278458058834076,
"learning_rate": 4.801260015963979e-05,
"loss": 1.6989,
"step": 2170
},
{
"epoch": 0.218,
"grad_norm": 0.4446634352207184,
"learning_rate": 4.790230228787671e-05,
"loss": 1.6944,
"step": 2180
},
{
"epoch": 0.219,
"grad_norm": 0.4040505588054657,
"learning_rate": 4.779276109193975e-05,
"loss": 1.6661,
"step": 2190
},
{
"epoch": 0.22,
"grad_norm": 0.45129111409187317,
"learning_rate": 4.7683967959512366e-05,
"loss": 1.6758,
"step": 2200
},
{
"epoch": 0.22,
"eval_loss": 1.6761962175369263,
"eval_runtime": 4.6057,
"eval_samples_per_second": 434.246,
"eval_steps_per_second": 27.14,
"step": 2200
},
{
"epoch": 0.221,
"grad_norm": 0.4348917007446289,
"learning_rate": 4.757591441488914e-05,
"loss": 1.6307,
"step": 2210
},
{
"epoch": 0.222,
"grad_norm": 0.4746496081352234,
"learning_rate": 4.746859211620213e-05,
"loss": 1.6654,
"step": 2220
},
{
"epoch": 0.223,
"grad_norm": 0.5951523184776306,
"learning_rate": 4.7361992852715864e-05,
"loss": 1.6478,
"step": 2230
},
{
"epoch": 0.224,
"grad_norm": 0.4033101797103882,
"learning_rate": 4.7256108542188905e-05,
"loss": 1.6441,
"step": 2240
},
{
"epoch": 0.225,
"grad_norm": 0.45017239451408386,
"learning_rate": 4.715093122829988e-05,
"loss": 1.6143,
"step": 2250
},
{
"epoch": 0.225,
"eval_loss": 1.6731691360473633,
"eval_runtime": 4.6295,
"eval_samples_per_second": 432.014,
"eval_steps_per_second": 27.001,
"step": 2250
},
{
"epoch": 0.226,
"grad_norm": 0.4218330383300781,
"learning_rate": 4.704645307813639e-05,
"loss": 1.6687,
"step": 2260
},
{
"epoch": 0.227,
"grad_norm": 0.41807785630226135,
"learning_rate": 4.6942666379744746e-05,
"loss": 1.6473,
"step": 2270
},
{
"epoch": 0.228,
"grad_norm": 0.44153642654418945,
"learning_rate": 4.683956353973895e-05,
"loss": 1.634,
"step": 2280
},
{
"epoch": 0.229,
"grad_norm": 0.4353524446487427,
"learning_rate": 4.6737137080967207e-05,
"loss": 1.6452,
"step": 2290
},
{
"epoch": 0.23,
"grad_norm": 0.47118180990219116,
"learning_rate": 4.663537964023428e-05,
"loss": 1.657,
"step": 2300
},
{
"epoch": 0.23,
"eval_loss": 1.6700432300567627,
"eval_runtime": 4.626,
"eval_samples_per_second": 432.343,
"eval_steps_per_second": 27.021,
"step": 2300
},
{
"epoch": 0.231,
"grad_norm": 0.41519850492477417,
"learning_rate": 4.653428396607817e-05,
"loss": 1.6589,
"step": 2310
},
{
"epoch": 0.232,
"grad_norm": 0.5066779255867004,
"learning_rate": 4.643384291659964e-05,
"loss": 1.6592,
"step": 2320
},
{
"epoch": 0.233,
"grad_norm": 0.4837518632411957,
"learning_rate": 4.6334049457342925e-05,
"loss": 1.6744,
"step": 2330
},
{
"epoch": 0.234,
"grad_norm": 0.43611717224121094,
"learning_rate": 4.623489665922651e-05,
"loss": 1.6966,
"step": 2340
},
{
"epoch": 0.235,
"grad_norm": 0.4694697856903076,
"learning_rate": 4.613637769652221e-05,
"loss": 1.6805,
"step": 2350
},
{
"epoch": 0.235,
"eval_loss": 1.678394079208374,
"eval_runtime": 4.6355,
"eval_samples_per_second": 431.454,
"eval_steps_per_second": 26.966,
"step": 2350
},
{
"epoch": 0.236,
"grad_norm": 0.38386818766593933,
"learning_rate": 4.603848584488156e-05,
"loss": 1.666,
"step": 2360
},
{
"epoch": 0.237,
"grad_norm": 0.4271336495876312,
"learning_rate": 4.594121447940805e-05,
"loss": 1.6365,
"step": 2370
},
{
"epoch": 0.238,
"grad_norm": 0.37166059017181396,
"learning_rate": 4.58445570727739e-05,
"loss": 1.6179,
"step": 2380
},
{
"epoch": 0.239,
"grad_norm": 0.40135860443115234,
"learning_rate": 4.574850719338033e-05,
"loss": 1.6805,
"step": 2390
},
{
"epoch": 0.24,
"grad_norm": 0.4425022602081299,
"learning_rate": 4.5653058503559986e-05,
"loss": 1.6637,
"step": 2400
},
{
"epoch": 0.24,
"eval_loss": 1.6743378639221191,
"eval_runtime": 4.6179,
"eval_samples_per_second": 433.097,
"eval_steps_per_second": 27.069,
"step": 2400
},
{
"epoch": 0.241,
"grad_norm": 0.4733668863773346,
"learning_rate": 4.555820475782052e-05,
"loss": 1.6514,
"step": 2410
},
{
"epoch": 0.242,
"grad_norm": 0.4362589716911316,
"learning_rate": 4.5463939801128054e-05,
"loss": 1.6338,
"step": 2420
},
{
"epoch": 0.243,
"grad_norm": 0.3984014391899109,
"learning_rate": 4.537025756722961e-05,
"loss": 1.5926,
"step": 2430
},
{
"epoch": 0.244,
"grad_norm": 0.38249659538269043,
"learning_rate": 4.527715207701344e-05,
"loss": 1.652,
"step": 2440
},
{
"epoch": 0.245,
"grad_norm": 0.3849375247955322,
"learning_rate": 4.51846174369061e-05,
"loss": 1.6031,
"step": 2450
},
{
"epoch": 0.245,
"eval_loss": 1.6682366132736206,
"eval_runtime": 4.6479,
"eval_samples_per_second": 430.304,
"eval_steps_per_second": 26.894,
"step": 2450
},
{
"epoch": 0.246,
"grad_norm": 0.4070509672164917,
"learning_rate": 4.5092647837305585e-05,
"loss": 1.6779,
"step": 2460
},
{
"epoch": 0.247,
"grad_norm": 0.3720800578594208,
"learning_rate": 4.500123755104922e-05,
"loss": 1.6363,
"step": 2470
},
{
"epoch": 0.248,
"grad_norm": 0.4045543074607849,
"learning_rate": 4.4910380931915706e-05,
"loss": 1.5965,
"step": 2480
},
{
"epoch": 0.249,
"grad_norm": 0.49813225865364075,
"learning_rate": 4.4820072413160295e-05,
"loss": 1.6665,
"step": 2490
},
{
"epoch": 0.25,
"grad_norm": 0.425383985042572,
"learning_rate": 4.4730306506082105e-05,
"loss": 1.6569,
"step": 2500
},
{
"epoch": 0.25,
"eval_loss": 1.6610209941864014,
"eval_runtime": 4.6252,
"eval_samples_per_second": 432.417,
"eval_steps_per_second": 27.026,
"step": 2500
},
{
"epoch": 0.251,
"grad_norm": 0.4529320001602173,
"learning_rate": 4.4641077798623075e-05,
"loss": 1.676,
"step": 2510
},
{
"epoch": 0.252,
"grad_norm": 0.44008973240852356,
"learning_rate": 4.455238095399737e-05,
"loss": 1.6743,
"step": 2520
},
{
"epoch": 0.253,
"grad_norm": 0.45680171251296997,
"learning_rate": 4.446421070935069e-05,
"loss": 1.6867,
"step": 2530
},
{
"epoch": 0.254,
"grad_norm": 0.42524072527885437,
"learning_rate": 4.437656187444866e-05,
"loss": 1.6318,
"step": 2540
},
{
"epoch": 0.255,
"grad_norm": 0.48738694190979004,
"learning_rate": 4.428942933039357e-05,
"loss": 1.6515,
"step": 2550
},
{
"epoch": 0.255,
"eval_loss": 1.6626614332199097,
"eval_runtime": 4.6447,
"eval_samples_per_second": 430.596,
"eval_steps_per_second": 26.912,
"step": 2550
},
{
"epoch": 0.256,
"grad_norm": 0.4267066419124603,
"learning_rate": 4.420280802836862e-05,
"loss": 1.6502,
"step": 2560
},
{
"epoch": 0.257,
"grad_norm": 0.37836384773254395,
"learning_rate": 4.4116692988409206e-05,
"loss": 1.5564,
"step": 2570
},
{
"epoch": 0.258,
"grad_norm": 0.384313702583313,
"learning_rate": 4.4031079298200316e-05,
"loss": 1.6081,
"step": 2580
},
{
"epoch": 0.259,
"grad_norm": 0.42795801162719727,
"learning_rate": 4.394596211189963e-05,
"loss": 1.6417,
"step": 2590
},
{
"epoch": 0.26,
"grad_norm": 0.51594477891922,
"learning_rate": 4.3861336648985394e-05,
"loss": 1.6494,
"step": 2600
},
{
"epoch": 0.26,
"eval_loss": 1.6588302850723267,
"eval_runtime": 4.6233,
"eval_samples_per_second": 432.588,
"eval_steps_per_second": 27.037,
"step": 2600
},
{
"epoch": 0.261,
"grad_norm": 0.42120248079299927,
"learning_rate": 4.3777198193128756e-05,
"loss": 1.6644,
"step": 2610
},
{
"epoch": 0.262,
"grad_norm": 0.4563160240650177,
"learning_rate": 4.369354209108969e-05,
"loss": 1.6758,
"step": 2620
},
{
"epoch": 0.263,
"grad_norm": 0.488189160823822,
"learning_rate": 4.3610363751636137e-05,
"loss": 1.6199,
"step": 2630
},
{
"epoch": 0.264,
"grad_norm": 0.44422969222068787,
"learning_rate": 4.352765864448559e-05,
"loss": 1.6438,
"step": 2640
},
{
"epoch": 0.265,
"grad_norm": 0.47733762860298157,
"learning_rate": 4.344542229926874e-05,
"loss": 1.581,
"step": 2650
},
{
"epoch": 0.265,
"eval_loss": 1.6608359813690186,
"eval_runtime": 4.628,
"eval_samples_per_second": 432.154,
"eval_steps_per_second": 27.01,
"step": 2650
},
{
"epoch": 0.266,
"grad_norm": 0.41654443740844727,
"learning_rate": 4.336365030451462e-05,
"loss": 1.6328,
"step": 2660
},
{
"epoch": 0.267,
"grad_norm": 0.3549344539642334,
"learning_rate": 4.3282338306656564e-05,
"loss": 1.5871,
"step": 2670
},
{
"epoch": 0.268,
"grad_norm": 0.36303457617759705,
"learning_rate": 4.32014820090587e-05,
"loss": 1.6032,
"step": 2680
},
{
"epoch": 0.269,
"grad_norm": 0.415372759103775,
"learning_rate": 4.312107717106231e-05,
"loss": 1.6015,
"step": 2690
},
{
"epoch": 0.27,
"grad_norm": 0.3588610291481018,
"learning_rate": 4.30411196070517e-05,
"loss": 1.615,
"step": 2700
},
{
"epoch": 0.27,
"eval_loss": 1.6555571556091309,
"eval_runtime": 4.6266,
"eval_samples_per_second": 432.287,
"eval_steps_per_second": 27.018,
"step": 2700
},
{
"epoch": 0.271,
"grad_norm": 0.47867655754089355,
"learning_rate": 4.296160518553892e-05,
"loss": 1.6321,
"step": 2710
},
{
"epoch": 0.272,
"grad_norm": 0.4255579710006714,
"learning_rate": 4.288252982826728e-05,
"loss": 1.5484,
"step": 2720
},
{
"epoch": 0.273,
"grad_norm": 0.41540971398353577,
"learning_rate": 4.2803889509332595e-05,
"loss": 1.6357,
"step": 2730
},
{
"epoch": 0.274,
"grad_norm": 0.41569575667381287,
"learning_rate": 4.2725680254322424e-05,
"loss": 1.6814,
"step": 2740
},
{
"epoch": 0.275,
"grad_norm": 0.40866607427597046,
"learning_rate": 4.2647898139472306e-05,
"loss": 1.621,
"step": 2750
},
{
"epoch": 0.275,
"eval_loss": 1.6504429578781128,
"eval_runtime": 4.639,
"eval_samples_per_second": 431.128,
"eval_steps_per_second": 26.945,
"step": 2750
},
{
"epoch": 0.276,
"grad_norm": 0.3872694969177246,
"learning_rate": 4.2570539290839e-05,
"loss": 1.6261,
"step": 2760
},
{
"epoch": 0.277,
"grad_norm": 0.40379494428634644,
"learning_rate": 4.2493599883490024e-05,
"loss": 1.6705,
"step": 2770
},
{
"epoch": 0.278,
"grad_norm": 0.35201510787010193,
"learning_rate": 4.241707614070937e-05,
"loss": 1.648,
"step": 2780
},
{
"epoch": 0.279,
"grad_norm": 0.41881263256073,
"learning_rate": 4.234096433321879e-05,
"loss": 1.5774,
"step": 2790
},
{
"epoch": 0.28,
"grad_norm": 0.4631783366203308,
"learning_rate": 4.226526077841448e-05,
"loss": 1.6091,
"step": 2800
},
{
"epoch": 0.28,
"eval_loss": 1.654015064239502,
"eval_runtime": 4.615,
"eval_samples_per_second": 433.373,
"eval_steps_per_second": 27.086,
"step": 2800
},
{
"epoch": 0.281,
"grad_norm": 0.4018985629081726,
"learning_rate": 4.218996183961868e-05,
"loss": 1.6026,
"step": 2810
},
{
"epoch": 0.282,
"grad_norm": 0.36099082231521606,
"learning_rate": 4.2115063925345885e-05,
"loss": 1.645,
"step": 2820
},
{
"epoch": 0.283,
"grad_norm": 0.4079434275627136,
"learning_rate": 4.204056348858339e-05,
"loss": 1.6135,
"step": 2830
},
{
"epoch": 0.284,
"grad_norm": 0.4359963536262512,
"learning_rate": 4.196645702608569e-05,
"loss": 1.6012,
"step": 2840
},
{
"epoch": 0.285,
"grad_norm": 0.459576815366745,
"learning_rate": 4.18927410776826e-05,
"loss": 1.6173,
"step": 2850
},
{
"epoch": 0.285,
"eval_loss": 1.6534111499786377,
"eval_runtime": 4.6296,
"eval_samples_per_second": 432.004,
"eval_steps_per_second": 27.0,
"step": 2850
},
{
"epoch": 0.286,
"grad_norm": 0.43706053495407104,
"learning_rate": 4.181941222560067e-05,
"loss": 1.6155,
"step": 2860
},
{
"epoch": 0.287,
"grad_norm": 0.3740486204624176,
"learning_rate": 4.1746467093797576e-05,
"loss": 1.5868,
"step": 2870
},
{
"epoch": 0.288,
"grad_norm": 0.4161520302295685,
"learning_rate": 4.167390234730933e-05,
"loss": 1.5994,
"step": 2880
},
{
"epoch": 0.289,
"grad_norm": 0.38721993565559387,
"learning_rate": 4.1601714691609825e-05,
"loss": 1.6165,
"step": 2890
},
{
"epoch": 0.29,
"grad_norm": 0.42869701981544495,
"learning_rate": 4.152990087198261e-05,
"loss": 1.6065,
"step": 2900
},
{
"epoch": 0.29,
"eval_loss": 1.6540226936340332,
"eval_runtime": 4.613,
"eval_samples_per_second": 433.558,
"eval_steps_per_second": 27.097,
"step": 2900
},
{
"epoch": 0.291,
"grad_norm": 0.38152533769607544,
"learning_rate": 4.145845767290457e-05,
"loss": 1.6351,
"step": 2910
},
{
"epoch": 0.292,
"grad_norm": 0.5534144043922424,
"learning_rate": 4.1387381917441095e-05,
"loss": 1.6169,
"step": 2920
},
{
"epoch": 0.293,
"grad_norm": 0.4238661527633667,
"learning_rate": 4.131667046665284e-05,
"loss": 1.6155,
"step": 2930
},
{
"epoch": 0.294,
"grad_norm": 0.44289034605026245,
"learning_rate": 4.1246320219013365e-05,
"loss": 1.5717,
"step": 2940
},
{
"epoch": 0.295,
"grad_norm": 0.3852718770503998,
"learning_rate": 4.117632810983782e-05,
"loss": 1.6337,
"step": 2950
},
{
"epoch": 0.295,
"eval_loss": 1.6513147354125977,
"eval_runtime": 4.6328,
"eval_samples_per_second": 431.708,
"eval_steps_per_second": 26.982,
"step": 2950
},
{
"epoch": 0.296,
"grad_norm": 0.3397306501865387,
"learning_rate": 4.1106691110722134e-05,
"loss": 1.6018,
"step": 2960
},
{
"epoch": 0.297,
"grad_norm": 0.35091128945350647,
"learning_rate": 4.10374062289927e-05,
"loss": 1.6031,
"step": 2970
},
{
"epoch": 0.298,
"grad_norm": 0.43097996711730957,
"learning_rate": 4.096847050716615e-05,
"loss": 1.5879,
"step": 2980
},
{
"epoch": 0.299,
"grad_norm": 0.41129791736602783,
"learning_rate": 4.089988102241916e-05,
"loss": 1.6094,
"step": 2990
},
{
"epoch": 0.3,
"grad_norm": 0.42959263920783997,
"learning_rate": 4.083163488606789e-05,
"loss": 1.5706,
"step": 3000
},
{
"epoch": 0.3,
"eval_loss": 1.6442506313323975,
"eval_runtime": 4.616,
"eval_samples_per_second": 433.279,
"eval_steps_per_second": 27.08,
"step": 3000
},
{
"epoch": 0.301,
"grad_norm": 0.41272443532943726,
"learning_rate": 4.076372924305703e-05,
"loss": 1.5555,
"step": 3010
},
{
"epoch": 0.302,
"grad_norm": 0.402739942073822,
"learning_rate": 4.0696161271458113e-05,
"loss": 1.634,
"step": 3020
},
{
"epoch": 0.303,
"grad_norm": 0.44482648372650146,
"learning_rate": 4.0628928181976975e-05,
"loss": 1.5485,
"step": 3030
},
{
"epoch": 0.304,
"grad_norm": 0.4939306676387787,
"learning_rate": 4.0562027217470115e-05,
"loss": 1.5673,
"step": 3040
},
{
"epoch": 0.305,
"grad_norm": 0.5246641635894775,
"learning_rate": 4.0495455652469793e-05,
"loss": 1.6093,
"step": 3050
},
{
"epoch": 0.305,
"eval_loss": 1.6460926532745361,
"eval_runtime": 4.6264,
"eval_samples_per_second": 432.302,
"eval_steps_per_second": 27.019,
"step": 3050
},
{
"epoch": 0.306,
"grad_norm": 0.4141904413700104,
"learning_rate": 4.0429210792717695e-05,
"loss": 1.6173,
"step": 3060
},
{
"epoch": 0.307,
"grad_norm": 0.3761964738368988,
"learning_rate": 4.03632899747069e-05,
"loss": 1.6303,
"step": 3070
},
{
"epoch": 0.308,
"grad_norm": 0.3574119806289673,
"learning_rate": 4.0297690565232124e-05,
"loss": 1.5726,
"step": 3080
},
{
"epoch": 0.309,
"grad_norm": 0.4022608995437622,
"learning_rate": 4.0232409960947924e-05,
"loss": 1.6603,
"step": 3090
},
{
"epoch": 0.31,
"grad_norm": 0.43622887134552,
"learning_rate": 4.0167445587934724e-05,
"loss": 1.6398,
"step": 3100
},
{
"epoch": 0.31,
"eval_loss": 1.6402058601379395,
"eval_runtime": 4.6099,
"eval_samples_per_second": 433.849,
"eval_steps_per_second": 27.116,
"step": 3100
},
{
"epoch": 0.311,
"grad_norm": 0.4243031442165375,
"learning_rate": 4.0102794901272596e-05,
"loss": 1.5742,
"step": 3110
},
{
"epoch": 0.312,
"grad_norm": 0.4194463789463043,
"learning_rate": 4.00384553846225e-05,
"loss": 1.6243,
"step": 3120
},
{
"epoch": 0.313,
"grad_norm": 0.3699721693992615,
"learning_rate": 3.997442454981493e-05,
"loss": 1.6164,
"step": 3130
},
{
"epoch": 0.314,
"grad_norm": 0.4038933217525482,
"learning_rate": 3.991069993644577e-05,
"loss": 1.6501,
"step": 3140
},
{
"epoch": 0.315,
"grad_norm": 0.44397690892219543,
"learning_rate": 3.984727911147916e-05,
"loss": 1.6439,
"step": 3150
},
{
"epoch": 0.315,
"eval_loss": 1.6468923091888428,
"eval_runtime": 4.6362,
"eval_samples_per_second": 431.391,
"eval_steps_per_second": 26.962,
"step": 3150
},
{
"epoch": 0.316,
"grad_norm": 0.4412182569503784,
"learning_rate": 3.978415966885739e-05,
"loss": 1.5617,
"step": 3160
},
{
"epoch": 0.317,
"grad_norm": 0.430493026971817,
"learning_rate": 3.972133922911742e-05,
"loss": 1.6315,
"step": 3170
},
{
"epoch": 0.318,
"grad_norm": 0.44671764969825745,
"learning_rate": 3.9658815439014244e-05,
"loss": 1.6522,
"step": 3180
},
{
"epoch": 0.319,
"grad_norm": 0.39204543828964233,
"learning_rate": 3.959658597115049e-05,
"loss": 1.5415,
"step": 3190
},
{
"epoch": 0.32,
"grad_norm": 0.38203272223472595,
"learning_rate": 3.953464852361267e-05,
"loss": 1.5773,
"step": 3200
},
{
"epoch": 0.32,
"eval_loss": 1.6403597593307495,
"eval_runtime": 4.6223,
"eval_samples_per_second": 432.689,
"eval_steps_per_second": 27.043,
"step": 3200
},
{
"epoch": 0.321,
"grad_norm": 0.46437588334083557,
"learning_rate": 3.9473000819613454e-05,
"loss": 1.6081,
"step": 3210
},
{
"epoch": 0.322,
"grad_norm": 0.38332080841064453,
"learning_rate": 3.941164060714018e-05,
"loss": 1.5312,
"step": 3220
},
{
"epoch": 0.323,
"grad_norm": 0.41572943329811096,
"learning_rate": 3.9350565658609336e-05,
"loss": 1.5595,
"step": 3230
},
{
"epoch": 0.324,
"grad_norm": 0.3750537931919098,
"learning_rate": 3.928977377052684e-05,
"loss": 1.626,
"step": 3240
},
{
"epoch": 0.325,
"grad_norm": 0.5018735527992249,
"learning_rate": 3.922926276315421e-05,
"loss": 1.6085,
"step": 3250
},
{
"epoch": 0.325,
"eval_loss": 1.6348525285720825,
"eval_runtime": 4.6293,
"eval_samples_per_second": 432.031,
"eval_steps_per_second": 27.002,
"step": 3250
},
{
"epoch": 0.326,
"grad_norm": 0.3808193802833557,
"learning_rate": 3.916903048018023e-05,
"loss": 1.6246,
"step": 3260
},
{
"epoch": 0.327,
"grad_norm": 0.34066709876060486,
"learning_rate": 3.910907478839825e-05,
"loss": 1.5548,
"step": 3270
},
{
"epoch": 0.328,
"grad_norm": 0.38990190625190735,
"learning_rate": 3.904939357738885e-05,
"loss": 1.5419,
"step": 3280
},
{
"epoch": 0.329,
"grad_norm": 0.45460209250450134,
"learning_rate": 3.8989984759207835e-05,
"loss": 1.5897,
"step": 3290
},
{
"epoch": 0.33,
"grad_norm": 0.3932059407234192,
"learning_rate": 3.893084626807942e-05,
"loss": 1.6439,
"step": 3300
},
{
"epoch": 0.33,
"eval_loss": 1.6305333375930786,
"eval_runtime": 4.6169,
"eval_samples_per_second": 433.188,
"eval_steps_per_second": 27.074,
"step": 3300
},
{
"epoch": 0.331,
"grad_norm": 0.3681378662586212,
"learning_rate": 3.887197606009451e-05,
"loss": 1.6234,
"step": 3310
},
{
"epoch": 0.332,
"grad_norm": 0.4306110739707947,
"learning_rate": 3.881337211291406e-05,
"loss": 1.5888,
"step": 3320
},
{
"epoch": 0.333,
"grad_norm": 0.43308112025260925,
"learning_rate": 3.875503242547721e-05,
"loss": 1.6162,
"step": 3330
},
{
"epoch": 0.334,
"grad_norm": 0.3679511547088623,
"learning_rate": 3.869695501771434e-05,
"loss": 1.5796,
"step": 3340
},
{
"epoch": 0.335,
"grad_norm": 0.4424503743648529,
"learning_rate": 3.8639137930264805e-05,
"loss": 1.591,
"step": 3350
},
{
"epoch": 0.335,
"eval_loss": 1.6320486068725586,
"eval_runtime": 4.6344,
"eval_samples_per_second": 431.558,
"eval_steps_per_second": 26.972,
"step": 3350
},
{
"epoch": 0.336,
"grad_norm": 0.4127017557621002,
"learning_rate": 3.858157922419927e-05,
"loss": 1.6142,
"step": 3360
},
{
"epoch": 0.337,
"grad_norm": 0.516035795211792,
"learning_rate": 3.8524276980746626e-05,
"loss": 1.6361,
"step": 3370
},
{
"epoch": 0.338,
"grad_norm": 0.35583969950675964,
"learning_rate": 3.8467229301025334e-05,
"loss": 1.6123,
"step": 3380
},
{
"epoch": 0.339,
"grad_norm": 0.40070992708206177,
"learning_rate": 3.8410434305779104e-05,
"loss": 1.5668,
"step": 3390
},
{
"epoch": 0.34,
"grad_norm": 0.4105798900127411,
"learning_rate": 3.835389013511689e-05,
"loss": 1.5962,
"step": 3400
},
{
"epoch": 0.34,
"eval_loss": 1.629593014717102,
"eval_runtime": 4.6284,
"eval_samples_per_second": 432.113,
"eval_steps_per_second": 27.007,
"step": 3400
},
{
"epoch": 0.341,
"grad_norm": 0.4205196797847748,
"learning_rate": 3.829759494825711e-05,
"loss": 1.5301,
"step": 3410
},
{
"epoch": 0.342,
"grad_norm": 0.39605259895324707,
"learning_rate": 3.82415469232759e-05,
"loss": 1.6035,
"step": 3420
},
{
"epoch": 0.343,
"grad_norm": 0.3812793493270874,
"learning_rate": 3.8185744256859485e-05,
"loss": 1.6069,
"step": 3430
},
{
"epoch": 0.344,
"grad_norm": 0.3666427433490753,
"learning_rate": 3.813018516406046e-05,
"loss": 1.5852,
"step": 3440
},
{
"epoch": 0.345,
"grad_norm": 0.4229329526424408,
"learning_rate": 3.807486787805802e-05,
"loss": 1.5569,
"step": 3450
},
{
"epoch": 0.345,
"eval_loss": 1.6314581632614136,
"eval_runtime": 4.6341,
"eval_samples_per_second": 431.582,
"eval_steps_per_second": 26.974,
"step": 3450
},
{
"epoch": 0.346,
"grad_norm": 0.41742926836013794,
"learning_rate": 3.801979064992194e-05,
"loss": 1.5947,
"step": 3460
},
{
"epoch": 0.347,
"grad_norm": 0.3738185465335846,
"learning_rate": 3.796495174838033e-05,
"loss": 1.5678,
"step": 3470
},
{
"epoch": 0.348,
"grad_norm": 0.3990820348262787,
"learning_rate": 3.7910349459591094e-05,
"loss": 1.5846,
"step": 3480
},
{
"epoch": 0.349,
"grad_norm": 0.3688414394855499,
"learning_rate": 3.785598208691693e-05,
"loss": 1.5944,
"step": 3490
},
{
"epoch": 0.35,
"grad_norm": 0.4192740023136139,
"learning_rate": 3.780184795070394e-05,
"loss": 1.5938,
"step": 3500
},
{
"epoch": 0.35,
"eval_loss": 1.6295636892318726,
"eval_runtime": 4.6346,
"eval_samples_per_second": 431.535,
"eval_steps_per_second": 26.971,
"step": 3500
},
{
"epoch": 0.351,
"grad_norm": 0.3593446910381317,
"learning_rate": 3.7747945388063626e-05,
"loss": 1.5717,
"step": 3510
},
{
"epoch": 0.352,
"grad_norm": 0.4086902439594269,
"learning_rate": 3.7694272752658386e-05,
"loss": 1.5605,
"step": 3520
},
{
"epoch": 0.353,
"grad_norm": 0.3733591139316559,
"learning_rate": 3.764082841449032e-05,
"loss": 1.6089,
"step": 3530
},
{
"epoch": 0.354,
"grad_norm": 0.3657650947570801,
"learning_rate": 3.758761075969328e-05,
"loss": 1.5542,
"step": 3540
},
{
"epoch": 0.355,
"grad_norm": 0.3745919466018677,
"learning_rate": 3.7534618190328195e-05,
"loss": 1.5384,
"step": 3550
},
{
"epoch": 0.355,
"eval_loss": 1.6273561716079712,
"eval_runtime": 4.6393,
"eval_samples_per_second": 431.096,
"eval_steps_per_second": 26.943,
"step": 3550
},
{
"epoch": 0.356,
"grad_norm": 0.36409613490104675,
"learning_rate": 3.748184912418159e-05,
"loss": 1.5235,
"step": 3560
},
{
"epoch": 0.357,
"grad_norm": 0.4654470980167389,
"learning_rate": 3.742930199456709e-05,
"loss": 1.5894,
"step": 3570
},
{
"epoch": 0.358,
"grad_norm": 0.38284242153167725,
"learning_rate": 3.7376975250130133e-05,
"loss": 1.6203,
"step": 3580
},
{
"epoch": 0.359,
"grad_norm": 0.3441823422908783,
"learning_rate": 3.732486735465553e-05,
"loss": 1.5614,
"step": 3590
},
{
"epoch": 0.36,
"grad_norm": 0.36308762431144714,
"learning_rate": 3.727297678687811e-05,
"loss": 1.5292,
"step": 3600
},
{
"epoch": 0.36,
"eval_loss": 1.6230641603469849,
"eval_runtime": 4.6201,
"eval_samples_per_second": 432.889,
"eval_steps_per_second": 27.056,
"step": 3600
},
{
"epoch": 0.361,
"grad_norm": 0.3441646099090576,
"learning_rate": 3.7221302040296116e-05,
"loss": 1.5294,
"step": 3610
},
{
"epoch": 0.362,
"grad_norm": 0.3621971905231476,
"learning_rate": 3.7169841622987545e-05,
"loss": 1.5199,
"step": 3620
},
{
"epoch": 0.363,
"grad_norm": 0.375041127204895,
"learning_rate": 3.711859405742924e-05,
"loss": 1.5561,
"step": 3630
},
{
"epoch": 0.364,
"grad_norm": 0.4329853355884552,
"learning_rate": 3.706755788031864e-05,
"loss": 1.5937,
"step": 3640
},
{
"epoch": 0.365,
"grad_norm": 0.394687294960022,
"learning_rate": 3.701673164239839e-05,
"loss": 1.5165,
"step": 3650
},
{
"epoch": 0.365,
"eval_loss": 1.623963475227356,
"eval_runtime": 4.6333,
"eval_samples_per_second": 431.655,
"eval_steps_per_second": 26.978,
"step": 3650
},
{
"epoch": 0.366,
"grad_norm": 0.394584059715271,
"learning_rate": 3.696611390828343e-05,
"loss": 1.5693,
"step": 3660
},
{
"epoch": 0.367,
"grad_norm": 0.39215362071990967,
"learning_rate": 3.691570325629073e-05,
"loss": 1.5818,
"step": 3670
},
{
"epoch": 0.368,
"grad_norm": 0.3797973394393921,
"learning_rate": 3.6865498278271596e-05,
"loss": 1.5617,
"step": 3680
},
{
"epoch": 0.369,
"grad_norm": 0.39166536927223206,
"learning_rate": 3.681549757944646e-05,
"loss": 1.6029,
"step": 3690
},
{
"epoch": 0.37,
"grad_norm": 0.4009022116661072,
"learning_rate": 3.676569977824213e-05,
"loss": 1.584,
"step": 3700
},
{
"epoch": 0.37,
"eval_loss": 1.6209263801574707,
"eval_runtime": 4.6099,
"eval_samples_per_second": 433.849,
"eval_steps_per_second": 27.116,
"step": 3700
},
{
"epoch": 0.371,
"grad_norm": 0.36677446961402893,
"learning_rate": 3.6716103506131446e-05,
"loss": 1.5531,
"step": 3710
},
{
"epoch": 0.372,
"grad_norm": 0.42492184042930603,
"learning_rate": 3.6666707407475314e-05,
"loss": 1.5946,
"step": 3720
},
{
"epoch": 0.373,
"grad_norm": 0.41753894090652466,
"learning_rate": 3.661751013936708e-05,
"loss": 1.5885,
"step": 3730
},
{
"epoch": 0.374,
"grad_norm": 0.3767106235027313,
"learning_rate": 3.656851037147919e-05,
"loss": 1.6074,
"step": 3740
},
{
"epoch": 0.375,
"grad_norm": 0.46671557426452637,
"learning_rate": 3.65197067859121e-05,
"loss": 1.5997,
"step": 3750
},
{
"epoch": 0.375,
"eval_loss": 1.6205507516860962,
"eval_runtime": 4.6338,
"eval_samples_per_second": 431.614,
"eval_steps_per_second": 26.976,
"step": 3750
},
{
"epoch": 0.376,
"grad_norm": 0.40476033091545105,
"learning_rate": 3.6471098077045404e-05,
"loss": 1.5884,
"step": 3760
},
{
"epoch": 0.377,
"grad_norm": 0.4570123851299286,
"learning_rate": 3.642268295139107e-05,
"loss": 1.5792,
"step": 3770
},
{
"epoch": 0.378,
"grad_norm": 0.4356667101383209,
"learning_rate": 3.6374460127448995e-05,
"loss": 1.5642,
"step": 3780
},
{
"epoch": 0.379,
"grad_norm": 0.43170198798179626,
"learning_rate": 3.632642833556441e-05,
"loss": 1.5847,
"step": 3790
},
{
"epoch": 0.38,
"grad_norm": 0.5624451637268066,
"learning_rate": 3.627858631778756e-05,
"loss": 1.5819,
"step": 3800
},
{
"epoch": 0.38,
"eval_loss": 1.6164690256118774,
"eval_runtime": 4.6091,
"eval_samples_per_second": 433.925,
"eval_steps_per_second": 27.12,
"step": 3800
},
{
"epoch": 0.381,
"grad_norm": 0.41750478744506836,
"learning_rate": 3.623093282773527e-05,
"loss": 1.6101,
"step": 3810
},
{
"epoch": 0.382,
"grad_norm": 0.41956469416618347,
"learning_rate": 3.6183466630454636e-05,
"loss": 1.5962,
"step": 3820
},
{
"epoch": 0.383,
"grad_norm": 0.409078985452652,
"learning_rate": 3.6136186502288535e-05,
"loss": 1.5791,
"step": 3830
},
{
"epoch": 0.384,
"grad_norm": 0.3656834661960602,
"learning_rate": 3.608909123074314e-05,
"loss": 1.5996,
"step": 3840
},
{
"epoch": 0.385,
"grad_norm": 0.46695512533187866,
"learning_rate": 3.6042179614357375e-05,
"loss": 1.6295,
"step": 3850
},
{
"epoch": 0.385,
"eval_loss": 1.6181100606918335,
"eval_runtime": 4.6344,
"eval_samples_per_second": 431.56,
"eval_steps_per_second": 26.972,
"step": 3850
},
{
"epoch": 0.386,
"grad_norm": 0.4372875392436981,
"learning_rate": 3.5995450462574126e-05,
"loss": 1.5875,
"step": 3860
},
{
"epoch": 0.387,
"grad_norm": 0.4402835965156555,
"learning_rate": 3.594890259561335e-05,
"loss": 1.6344,
"step": 3870
},
{
"epoch": 0.388,
"grad_norm": 0.4220004677772522,
"learning_rate": 3.5902534844346976e-05,
"loss": 1.6147,
"step": 3880
},
{
"epoch": 0.389,
"grad_norm": 0.3918090760707855,
"learning_rate": 3.5856346050175565e-05,
"loss": 1.4967,
"step": 3890
},
{
"epoch": 0.39,
"grad_norm": 0.3966856598854065,
"learning_rate": 3.581033506490671e-05,
"loss": 1.5349,
"step": 3900
},
{
"epoch": 0.39,
"eval_loss": 1.6230711936950684,
"eval_runtime": 4.6195,
"eval_samples_per_second": 432.948,
"eval_steps_per_second": 27.059,
"step": 3900
},
{
"epoch": 0.391,
"grad_norm": 0.38468602299690247,
"learning_rate": 3.576450075063519e-05,
"loss": 1.5366,
"step": 3910
},
{
"epoch": 0.392,
"grad_norm": 0.39990097284317017,
"learning_rate": 3.571884197962469e-05,
"loss": 1.5028,
"step": 3920
},
{
"epoch": 0.393,
"grad_norm": 0.3719644248485565,
"learning_rate": 3.567335763419138e-05,
"loss": 1.5696,
"step": 3930
},
{
"epoch": 0.394,
"grad_norm": 0.40582171082496643,
"learning_rate": 3.562804660658888e-05,
"loss": 1.5883,
"step": 3940
},
{
"epoch": 0.395,
"grad_norm": 0.387210488319397,
"learning_rate": 3.5582907798895035e-05,
"loss": 1.561,
"step": 3950
},
{
"epoch": 0.395,
"eval_loss": 1.6173763275146484,
"eval_runtime": 4.6286,
"eval_samples_per_second": 432.1,
"eval_steps_per_second": 27.006,
"step": 3950
},
{
"epoch": 0.396,
"grad_norm": 0.39436179399490356,
"learning_rate": 3.55379401229001e-05,
"loss": 1.527,
"step": 3960
},
{
"epoch": 0.397,
"grad_norm": 0.37865695357322693,
"learning_rate": 3.5493142499996666e-05,
"loss": 1.5324,
"step": 3970
},
{
"epoch": 0.398,
"grad_norm": 0.3817419409751892,
"learning_rate": 3.544851386107085e-05,
"loss": 1.5975,
"step": 3980
},
{
"epoch": 0.399,
"grad_norm": 0.3955094814300537,
"learning_rate": 3.540405314639526e-05,
"loss": 1.5799,
"step": 3990
},
{
"epoch": 0.4,
"grad_norm": 0.432600200176239,
"learning_rate": 3.535975930552322e-05,
"loss": 1.5693,
"step": 4000
},
{
"epoch": 0.4,
"eval_loss": 1.6128498315811157,
"eval_runtime": 4.6082,
"eval_samples_per_second": 434.006,
"eval_steps_per_second": 27.125,
"step": 4000
},
{
"epoch": 0.401,
"grad_norm": 0.42284053564071655,
"learning_rate": 3.531563129718458e-05,
"loss": 1.5441,
"step": 4010
},
{
"epoch": 0.402,
"grad_norm": 0.36951157450675964,
"learning_rate": 3.527166808918287e-05,
"loss": 1.5692,
"step": 4020
},
{
"epoch": 0.403,
"grad_norm": 0.4210173785686493,
"learning_rate": 3.522786865829391e-05,
"loss": 1.5176,
"step": 4030
},
{
"epoch": 0.404,
"grad_norm": 0.491728276014328,
"learning_rate": 3.51842319901658e-05,
"loss": 1.56,
"step": 4040
},
{
"epoch": 0.405,
"grad_norm": 0.4052882492542267,
"learning_rate": 3.5140757079220216e-05,
"loss": 1.56,
"step": 4050
},
{
"epoch": 0.405,
"eval_loss": 1.6166870594024658,
"eval_runtime": 4.6429,
"eval_samples_per_second": 430.77,
"eval_steps_per_second": 26.923,
"step": 4050
},
{
"epoch": 0.406,
"grad_norm": 0.3819957375526428,
"learning_rate": 3.509744292855511e-05,
"loss": 1.5628,
"step": 4060
},
{
"epoch": 0.407,
"grad_norm": 0.4066322147846222,
"learning_rate": 3.505428854984869e-05,
"loss": 1.6145,
"step": 4070
},
{
"epoch": 0.408,
"grad_norm": 0.328678697347641,
"learning_rate": 3.5011292963264705e-05,
"loss": 1.6075,
"step": 4080
},
{
"epoch": 0.409,
"grad_norm": 0.3642025291919708,
"learning_rate": 3.496845519735901e-05,
"loss": 1.499,
"step": 4090
},
{
"epoch": 0.41,
"grad_norm": 0.40798667073249817,
"learning_rate": 3.492577428898734e-05,
"loss": 1.5397,
"step": 4100
},
{
"epoch": 0.41,
"eval_loss": 1.6112936735153198,
"eval_runtime": 4.6258,
"eval_samples_per_second": 432.359,
"eval_steps_per_second": 27.022,
"step": 4100
},
{
"epoch": 0.411,
"grad_norm": 0.40259620547294617,
"learning_rate": 3.4883249283214416e-05,
"loss": 1.606,
"step": 4110
},
{
"epoch": 0.412,
"grad_norm": 0.41489359736442566,
"learning_rate": 3.4840879233224196e-05,
"loss": 1.5389,
"step": 4120
},
{
"epoch": 0.413,
"grad_norm": 0.39256951212882996,
"learning_rate": 3.4798663200231265e-05,
"loss": 1.554,
"step": 4130
},
{
"epoch": 0.414,
"grad_norm": 0.3881060779094696,
"learning_rate": 3.475660025339355e-05,
"loss": 1.5654,
"step": 4140
},
{
"epoch": 0.415,
"grad_norm": 0.4276053011417389,
"learning_rate": 3.471468946972612e-05,
"loss": 1.5292,
"step": 4150
},
{
"epoch": 0.415,
"eval_loss": 1.6163493394851685,
"eval_runtime": 4.642,
"eval_samples_per_second": 430.846,
"eval_steps_per_second": 26.928,
"step": 4150
},
{
"epoch": 0.416,
"grad_norm": 0.38684210181236267,
"learning_rate": 3.467292993401603e-05,
"loss": 1.5643,
"step": 4160
},
{
"epoch": 0.417,
"grad_norm": 0.39144203066825867,
"learning_rate": 3.4631320738738494e-05,
"loss": 1.5399,
"step": 4170
},
{
"epoch": 0.418,
"grad_norm": 0.39135465025901794,
"learning_rate": 3.458986098397395e-05,
"loss": 1.5646,
"step": 4180
},
{
"epoch": 0.419,
"grad_norm": 0.44311603903770447,
"learning_rate": 3.45485497773264e-05,
"loss": 1.5739,
"step": 4190
},
{
"epoch": 0.42,
"grad_norm": 0.45056262612342834,
"learning_rate": 3.450738623384265e-05,
"loss": 1.6048,
"step": 4200
},
{
"epoch": 0.42,
"eval_loss": 1.6106904745101929,
"eval_runtime": 4.616,
"eval_samples_per_second": 433.279,
"eval_steps_per_second": 27.08,
"step": 4200
},
{
"epoch": 0.421,
"grad_norm": 0.37200435996055603,
"learning_rate": 3.4466369475932744e-05,
"loss": 1.5346,
"step": 4210
},
{
"epoch": 0.422,
"grad_norm": 0.3999169170856476,
"learning_rate": 3.442549863329138e-05,
"loss": 1.5593,
"step": 4220
},
{
"epoch": 0.423,
"grad_norm": 0.38607126474380493,
"learning_rate": 3.438477284282031e-05,
"loss": 1.557,
"step": 4230
},
{
"epoch": 0.424,
"grad_norm": 0.4031635820865631,
"learning_rate": 3.4344191248551814e-05,
"loss": 1.5875,
"step": 4240
},
{
"epoch": 0.425,
"grad_norm": 0.37006083130836487,
"learning_rate": 3.4303753001573164e-05,
"loss": 1.576,
"step": 4250
},
{
"epoch": 0.425,
"eval_loss": 1.612293004989624,
"eval_runtime": 4.6366,
"eval_samples_per_second": 431.347,
"eval_steps_per_second": 26.959,
"step": 4250
},
{
"epoch": 0.426,
"grad_norm": 0.37665873765945435,
"learning_rate": 3.426345725995197e-05,
"loss": 1.5487,
"step": 4260
},
{
"epoch": 0.427,
"grad_norm": 0.4067043662071228,
"learning_rate": 3.422330318866262e-05,
"loss": 1.5885,
"step": 4270
},
{
"epoch": 0.428,
"grad_norm": 0.37107494473457336,
"learning_rate": 3.4183289959513575e-05,
"loss": 1.5542,
"step": 4280
},
{
"epoch": 0.429,
"grad_norm": 0.35898610949516296,
"learning_rate": 3.414341675107563e-05,
"loss": 1.5508,
"step": 4290
},
{
"epoch": 0.43,
"grad_norm": 0.3901163339614868,
"learning_rate": 3.4103682748611054e-05,
"loss": 1.6198,
"step": 4300
},
{
"epoch": 0.43,
"eval_loss": 1.6109461784362793,
"eval_runtime": 4.6346,
"eval_samples_per_second": 431.536,
"eval_steps_per_second": 26.971,
"step": 4300
},
{
"epoch": 0.431,
"grad_norm": 0.41391491889953613,
"learning_rate": 3.4064087144003755e-05,
"loss": 1.5761,
"step": 4310
},
{
"epoch": 0.432,
"grad_norm": 0.4450955390930176,
"learning_rate": 3.402462913569015e-05,
"loss": 1.5615,
"step": 4320
},
{
"epoch": 0.433,
"grad_norm": 0.44352859258651733,
"learning_rate": 3.3985307928591074e-05,
"loss": 1.5776,
"step": 4330
},
{
"epoch": 0.434,
"grad_norm": 0.3886057436466217,
"learning_rate": 3.3946122734044455e-05,
"loss": 1.5866,
"step": 4340
},
{
"epoch": 0.435,
"grad_norm": 0.36949875950813293,
"learning_rate": 3.390707276973892e-05,
"loss": 1.5043,
"step": 4350
},
{
"epoch": 0.435,
"eval_loss": 1.6040953397750854,
"eval_runtime": 4.651,
"eval_samples_per_second": 430.016,
"eval_steps_per_second": 26.876,
"step": 4350
},
{
"epoch": 0.436,
"grad_norm": 0.37118640542030334,
"learning_rate": 3.3868157259648156e-05,
"loss": 1.55,
"step": 4360
},
{
"epoch": 0.437,
"grad_norm": 0.3654156029224396,
"learning_rate": 3.382937543396614e-05,
"loss": 1.5805,
"step": 4370
},
{
"epoch": 0.438,
"grad_norm": 0.36356887221336365,
"learning_rate": 3.379072652904321e-05,
"loss": 1.5733,
"step": 4380
},
{
"epoch": 0.439,
"grad_norm": 0.3553885519504547,
"learning_rate": 3.375220978732289e-05,
"loss": 1.5236,
"step": 4390
},
{
"epoch": 0.44,
"grad_norm": 0.33554360270500183,
"learning_rate": 3.371382445727951e-05,
"loss": 1.4966,
"step": 4400
},
{
"epoch": 0.44,
"eval_loss": 1.6084920167922974,
"eval_runtime": 4.6233,
"eval_samples_per_second": 432.591,
"eval_steps_per_second": 27.037,
"step": 4400
},
{
"epoch": 0.441,
"grad_norm": 0.5069860816001892,
"learning_rate": 3.3675569793356654e-05,
"loss": 1.5297,
"step": 4410
},
{
"epoch": 0.442,
"grad_norm": 0.4153578579425812,
"learning_rate": 3.36374450559063e-05,
"loss": 1.6225,
"step": 4420
},
{
"epoch": 0.443,
"grad_norm": 0.4991408884525299,
"learning_rate": 3.359944951112878e-05,
"loss": 1.5769,
"step": 4430
},
{
"epoch": 0.444,
"grad_norm": 0.38914990425109863,
"learning_rate": 3.356158243101345e-05,
"loss": 1.5169,
"step": 4440
},
{
"epoch": 0.445,
"grad_norm": 0.38475480675697327,
"learning_rate": 3.3523843093280096e-05,
"loss": 1.532,
"step": 4450
},
{
"epoch": 0.445,
"eval_loss": 1.6051650047302246,
"eval_runtime": 4.6208,
"eval_samples_per_second": 432.824,
"eval_steps_per_second": 27.051,
"step": 4450
},
{
"epoch": 0.446,
"grad_norm": 0.3508313000202179,
"learning_rate": 3.348623078132111e-05,
"loss": 1.5924,
"step": 4460
},
{
"epoch": 0.447,
"grad_norm": 0.3878994286060333,
"learning_rate": 3.344874478414426e-05,
"loss": 1.5773,
"step": 4470
},
{
"epoch": 0.448,
"grad_norm": 0.39611005783081055,
"learning_rate": 3.341138439631633e-05,
"loss": 1.5787,
"step": 4480
},
{
"epoch": 0.449,
"grad_norm": 0.37631756067276,
"learning_rate": 3.337414891790731e-05,
"loss": 1.4992,
"step": 4490
},
{
"epoch": 0.45,
"grad_norm": 0.35049423575401306,
"learning_rate": 3.3337037654435325e-05,
"loss": 1.5761,
"step": 4500
},
{
"epoch": 0.45,
"eval_loss": 1.6108379364013672,
"eval_runtime": 4.6124,
"eval_samples_per_second": 433.615,
"eval_steps_per_second": 27.101,
"step": 4500
},
{
"epoch": 0.451,
"grad_norm": 0.41320350766181946,
"learning_rate": 3.330004991681224e-05,
"loss": 1.5535,
"step": 4510
},
{
"epoch": 0.452,
"grad_norm": 0.40881311893463135,
"learning_rate": 3.3263185021289925e-05,
"loss": 1.5806,
"step": 4520
},
{
"epoch": 0.453,
"grad_norm": 0.3850957453250885,
"learning_rate": 3.322644228940717e-05,
"loss": 1.585,
"step": 4530
},
{
"epoch": 0.454,
"grad_norm": 0.36344847083091736,
"learning_rate": 3.318982104793721e-05,
"loss": 1.5393,
"step": 4540
},
{
"epoch": 0.455,
"grad_norm": 0.41180986166000366,
"learning_rate": 3.3153320628835953e-05,
"loss": 1.5423,
"step": 4550
},
{
"epoch": 0.455,
"eval_loss": 1.6033803224563599,
"eval_runtime": 4.6294,
"eval_samples_per_second": 432.018,
"eval_steps_per_second": 27.001,
"step": 4550
},
{
"epoch": 0.456,
"grad_norm": 0.3614816963672638,
"learning_rate": 3.311694036919079e-05,
"loss": 1.4995,
"step": 4560
},
{
"epoch": 0.457,
"grad_norm": 0.416110098361969,
"learning_rate": 3.308067961117001e-05,
"loss": 1.4939,
"step": 4570
},
{
"epoch": 0.458,
"grad_norm": 0.3330053389072418,
"learning_rate": 3.3044537701972836e-05,
"loss": 1.5118,
"step": 4580
},
{
"epoch": 0.459,
"grad_norm": 0.35887932777404785,
"learning_rate": 3.30085139937801e-05,
"loss": 1.5084,
"step": 4590
},
{
"epoch": 0.46,
"grad_norm": 0.33625465631484985,
"learning_rate": 3.2972607843705445e-05,
"loss": 1.5614,
"step": 4600
},
{
"epoch": 0.46,
"eval_loss": 1.6055898666381836,
"eval_runtime": 4.6102,
"eval_samples_per_second": 433.823,
"eval_steps_per_second": 27.114,
"step": 4600
},
{
"epoch": 0.461,
"grad_norm": 0.4302857518196106,
"learning_rate": 3.293681861374713e-05,
"loss": 1.459,
"step": 4610
},
{
"epoch": 0.462,
"grad_norm": 0.37634381651878357,
"learning_rate": 3.290114567074045e-05,
"loss": 1.513,
"step": 4620
},
{
"epoch": 0.463,
"grad_norm": 0.3611469268798828,
"learning_rate": 3.2865588386310646e-05,
"loss": 1.5474,
"step": 4630
},
{
"epoch": 0.464,
"grad_norm": 0.34918755292892456,
"learning_rate": 3.2830146136826445e-05,
"loss": 1.5675,
"step": 4640
},
{
"epoch": 0.465,
"grad_norm": 0.3509509563446045,
"learning_rate": 3.2794818303354126e-05,
"loss": 1.5691,
"step": 4650
},
{
"epoch": 0.465,
"eval_loss": 1.602272391319275,
"eval_runtime": 4.6268,
"eval_samples_per_second": 432.262,
"eval_steps_per_second": 27.016,
"step": 4650
},
{
"epoch": 0.466,
"grad_norm": 0.3535073399543762,
"learning_rate": 3.2759604271612094e-05,
"loss": 1.5149,
"step": 4660
},
{
"epoch": 0.467,
"grad_norm": 0.36410200595855713,
"learning_rate": 3.272450343192603e-05,
"loss": 1.5849,
"step": 4670
},
{
"epoch": 0.468,
"grad_norm": 0.34112101793289185,
"learning_rate": 3.2689515179184576e-05,
"loss": 1.5669,
"step": 4680
},
{
"epoch": 0.469,
"grad_norm": 0.352558970451355,
"learning_rate": 3.265463891279551e-05,
"loss": 1.4972,
"step": 4690
},
{
"epoch": 0.47,
"grad_norm": 0.4231419563293457,
"learning_rate": 3.2619874036642426e-05,
"loss": 1.5375,
"step": 4700
},
{
"epoch": 0.47,
"eval_loss": 1.6014271974563599,
"eval_runtime": 4.6266,
"eval_samples_per_second": 432.286,
"eval_steps_per_second": 27.018,
"step": 4700
},
{
"epoch": 0.471,
"grad_norm": 0.4738163352012634,
"learning_rate": 3.258521995904196e-05,
"loss": 1.5724,
"step": 4710
},
{
"epoch": 0.472,
"grad_norm": 0.489130437374115,
"learning_rate": 3.2550676092701496e-05,
"loss": 1.5234,
"step": 4720
},
{
"epoch": 0.473,
"grad_norm": 0.40578359365463257,
"learning_rate": 3.2516241854677356e-05,
"loss": 1.4739,
"step": 4730
},
{
"epoch": 0.474,
"grad_norm": 0.3754052221775055,
"learning_rate": 3.248191666633348e-05,
"loss": 1.5517,
"step": 4740
},
{
"epoch": 0.475,
"grad_norm": 0.4701234996318817,
"learning_rate": 3.244769995330059e-05,
"loss": 1.557,
"step": 4750
},
{
"epoch": 0.475,
"eval_loss": 1.6014772653579712,
"eval_runtime": 4.6244,
"eval_samples_per_second": 432.488,
"eval_steps_per_second": 27.03,
"step": 4750
},
{
"epoch": 0.476,
"grad_norm": 0.37672650814056396,
"learning_rate": 3.241359114543582e-05,
"loss": 1.5531,
"step": 4760
},
{
"epoch": 0.477,
"grad_norm": 0.40195539593696594,
"learning_rate": 3.237958967678283e-05,
"loss": 1.5759,
"step": 4770
},
{
"epoch": 0.478,
"grad_norm": 0.3967335522174835,
"learning_rate": 3.234569498553233e-05,
"loss": 1.5871,
"step": 4780
},
{
"epoch": 0.479,
"grad_norm": 0.3859959840774536,
"learning_rate": 3.231190651398314e-05,
"loss": 1.5716,
"step": 4790
},
{
"epoch": 0.48,
"grad_norm": 0.42818543314933777,
"learning_rate": 3.227822370850359e-05,
"loss": 1.5402,
"step": 4800
},
{
"epoch": 0.48,
"eval_loss": 1.5974245071411133,
"eval_runtime": 4.6278,
"eval_samples_per_second": 432.172,
"eval_steps_per_second": 27.011,
"step": 4800
},
{
"epoch": 0.481,
"grad_norm": 0.4882192015647888,
"learning_rate": 3.224464601949349e-05,
"loss": 1.599,
"step": 4810
},
{
"epoch": 0.482,
"grad_norm": 0.4261423647403717,
"learning_rate": 3.2211172901346385e-05,
"loss": 1.5992,
"step": 4820
},
{
"epoch": 0.483,
"grad_norm": 0.3666815459728241,
"learning_rate": 3.21778038124124e-05,
"loss": 1.595,
"step": 4830
},
{
"epoch": 0.484,
"grad_norm": 0.35577964782714844,
"learning_rate": 3.2144538214961344e-05,
"loss": 1.5837,
"step": 4840
},
{
"epoch": 0.485,
"grad_norm": 0.3705880045890808,
"learning_rate": 3.21113755751464e-05,
"loss": 1.5842,
"step": 4850
},
{
"epoch": 0.485,
"eval_loss": 1.6001406908035278,
"eval_runtime": 4.6403,
"eval_samples_per_second": 431.008,
"eval_steps_per_second": 26.938,
"step": 4850
},
{
"epoch": 0.486,
"grad_norm": 0.4082805812358856,
"learning_rate": 3.207831536296808e-05,
"loss": 1.5471,
"step": 4860
},
{
"epoch": 0.487,
"grad_norm": 0.41893184185028076,
"learning_rate": 3.2045357052238676e-05,
"loss": 1.5404,
"step": 4870
},
{
"epoch": 0.488,
"grad_norm": 0.4093189537525177,
"learning_rate": 3.201250012054707e-05,
"loss": 1.5889,
"step": 4880
},
{
"epoch": 0.489,
"grad_norm": 0.4183659553527832,
"learning_rate": 3.197974404922397e-05,
"loss": 1.5439,
"step": 4890
},
{
"epoch": 0.49,
"grad_norm": 0.4108491837978363,
"learning_rate": 3.194708832330752e-05,
"loss": 1.5454,
"step": 4900
},
{
"epoch": 0.49,
"eval_loss": 1.596949577331543,
"eval_runtime": 4.612,
"eval_samples_per_second": 433.65,
"eval_steps_per_second": 27.103,
"step": 4900
},
{
"epoch": 0.491,
"grad_norm": 0.44454118609428406,
"learning_rate": 3.191453243150929e-05,
"loss": 1.5632,
"step": 4910
},
{
"epoch": 0.492,
"grad_norm": 0.3570930063724518,
"learning_rate": 3.188207586618064e-05,
"loss": 1.4952,
"step": 4920
},
{
"epoch": 0.493,
"grad_norm": 0.377950519323349,
"learning_rate": 3.1849718123279517e-05,
"loss": 1.5151,
"step": 4930
},
{
"epoch": 0.494,
"grad_norm": 0.38532108068466187,
"learning_rate": 3.181745870233753e-05,
"loss": 1.5811,
"step": 4940
},
{
"epoch": 0.495,
"grad_norm": 0.3370131850242615,
"learning_rate": 3.178529710642749e-05,
"loss": 1.5415,
"step": 4950
},
{
"epoch": 0.495,
"eval_loss": 1.5937820672988892,
"eval_runtime": 4.6236,
"eval_samples_per_second": 432.56,
"eval_steps_per_second": 27.035,
"step": 4950
},
{
"epoch": 0.496,
"grad_norm": 0.43014732003211975,
"learning_rate": 3.175323284213118e-05,
"loss": 1.5406,
"step": 4960
},
{
"epoch": 0.497,
"grad_norm": 0.4400146007537842,
"learning_rate": 3.172126541950766e-05,
"loss": 1.5578,
"step": 4970
},
{
"epoch": 0.498,
"grad_norm": 0.41548582911491394,
"learning_rate": 3.1689394352061735e-05,
"loss": 1.5549,
"step": 4980
},
{
"epoch": 0.499,
"grad_norm": 0.37343522906303406,
"learning_rate": 3.165761915671293e-05,
"loss": 1.5127,
"step": 4990
},
{
"epoch": 0.5,
"grad_norm": 0.3881683051586151,
"learning_rate": 3.162593935376469e-05,
"loss": 1.5553,
"step": 5000
},
{
"epoch": 0.5,
"eval_loss": 1.6019805669784546,
"eval_runtime": 4.6105,
"eval_samples_per_second": 433.793,
"eval_steps_per_second": 27.112,
"step": 5000
},
{
"epoch": 0.501,
"grad_norm": 0.35221096873283386,
"learning_rate": 3.159435446687396e-05,
"loss": 1.5544,
"step": 5010
},
{
"epoch": 0.502,
"grad_norm": 0.33756914734840393,
"learning_rate": 3.1562864023021174e-05,
"loss": 1.5108,
"step": 5020
},
{
"epoch": 0.503,
"grad_norm": 0.340436726808548,
"learning_rate": 3.1531467552480395e-05,
"loss": 1.4998,
"step": 5030
},
{
"epoch": 0.504,
"grad_norm": 0.3652532696723938,
"learning_rate": 3.1500164588789964e-05,
"loss": 1.5853,
"step": 5040
},
{
"epoch": 0.505,
"grad_norm": 0.37459462881088257,
"learning_rate": 3.146895466872338e-05,
"loss": 1.5595,
"step": 5050
},
{
"epoch": 0.505,
"eval_loss": 1.5959675312042236,
"eval_runtime": 4.6328,
"eval_samples_per_second": 431.705,
"eval_steps_per_second": 26.982,
"step": 5050
},
{
"epoch": 0.506,
"grad_norm": 0.4339495599269867,
"learning_rate": 3.1437837332260465e-05,
"loss": 1.5392,
"step": 5060
},
{
"epoch": 0.507,
"grad_norm": 0.4025578796863556,
"learning_rate": 3.140681212255896e-05,
"loss": 1.499,
"step": 5070
},
{
"epoch": 0.508,
"grad_norm": 0.38840150833129883,
"learning_rate": 3.137587858592628e-05,
"loss": 1.5285,
"step": 5080
},
{
"epoch": 0.509,
"grad_norm": 0.34794872999191284,
"learning_rate": 3.13450362717917e-05,
"loss": 1.5384,
"step": 5090
},
{
"epoch": 0.51,
"grad_norm": 0.3731895089149475,
"learning_rate": 3.131428473267876e-05,
"loss": 1.5532,
"step": 5100
},
{
"epoch": 0.51,
"eval_loss": 1.591291904449463,
"eval_runtime": 4.625,
"eval_samples_per_second": 432.432,
"eval_steps_per_second": 27.027,
"step": 5100
}
],
"logging_steps": 10,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3645715079168e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}