grab / trainer_state.json
LegrandFrederic's picture
Upload trainer_state.json with huggingface_hub
eb9582d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 2560,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0390625,
"grad_norm": 8.492419242858887,
"learning_rate": 7.8125e-06,
"loss": 1.3342,
"step": 10
},
{
"epoch": 0.078125,
"grad_norm": 3.8445992469787598,
"learning_rate": 1.5625e-05,
"loss": 1.0352,
"step": 20
},
{
"epoch": 0.1171875,
"grad_norm": 2.211190700531006,
"learning_rate": 2.34375e-05,
"loss": 0.4873,
"step": 30
},
{
"epoch": 0.15625,
"grad_norm": 1.9646821022033691,
"learning_rate": 3.125e-05,
"loss": 0.3898,
"step": 40
},
{
"epoch": 0.1953125,
"grad_norm": 1.9914284944534302,
"learning_rate": 3.90625e-05,
"loss": 0.2554,
"step": 50
},
{
"epoch": 0.234375,
"grad_norm": 0.9096956253051758,
"learning_rate": 4.6875e-05,
"loss": 0.1924,
"step": 60
},
{
"epoch": 0.2734375,
"grad_norm": 0.6492406129837036,
"learning_rate": 5.46875e-05,
"loss": 0.1721,
"step": 70
},
{
"epoch": 0.3125,
"grad_norm": 0.8035663962364197,
"learning_rate": 6.25e-05,
"loss": 0.1454,
"step": 80
},
{
"epoch": 0.3515625,
"grad_norm": 0.5276547074317932,
"learning_rate": 7.031250000000001e-05,
"loss": 0.1343,
"step": 90
},
{
"epoch": 0.390625,
"grad_norm": 0.735613226890564,
"learning_rate": 7.8125e-05,
"loss": 0.1318,
"step": 100
},
{
"epoch": 0.4296875,
"grad_norm": 1.0286799669265747,
"learning_rate": 8.593750000000001e-05,
"loss": 0.111,
"step": 110
},
{
"epoch": 0.46875,
"grad_norm": 0.6054307222366333,
"learning_rate": 9.375e-05,
"loss": 0.1102,
"step": 120
},
{
"epoch": 0.5078125,
"grad_norm": 0.6528738737106323,
"learning_rate": 9.999983313226824e-05,
"loss": 0.1024,
"step": 130
},
{
"epoch": 0.546875,
"grad_norm": 0.522591769695282,
"learning_rate": 9.999399287860388e-05,
"loss": 0.0946,
"step": 140
},
{
"epoch": 0.5859375,
"grad_norm": 0.7753025889396667,
"learning_rate": 9.997981035211134e-05,
"loss": 0.093,
"step": 150
},
{
"epoch": 0.625,
"grad_norm": 0.58796226978302,
"learning_rate": 9.995728791936504e-05,
"loss": 0.0881,
"step": 160
},
{
"epoch": 0.6640625,
"grad_norm": 0.5107079744338989,
"learning_rate": 9.992642933858212e-05,
"loss": 0.0807,
"step": 170
},
{
"epoch": 0.703125,
"grad_norm": 0.9606569409370422,
"learning_rate": 9.988723975899517e-05,
"loss": 0.0991,
"step": 180
},
{
"epoch": 0.7421875,
"grad_norm": 0.7940914034843445,
"learning_rate": 9.983972571999318e-05,
"loss": 0.0858,
"step": 190
},
{
"epoch": 0.78125,
"grad_norm": 0.805740237236023,
"learning_rate": 9.97838951500302e-05,
"loss": 0.0975,
"step": 200
},
{
"epoch": 0.8203125,
"grad_norm": 0.8114387392997742,
"learning_rate": 9.971975736530243e-05,
"loss": 0.0815,
"step": 210
},
{
"epoch": 0.859375,
"grad_norm": 0.5807178616523743,
"learning_rate": 9.96473230681937e-05,
"loss": 0.0768,
"step": 220
},
{
"epoch": 0.8984375,
"grad_norm": 0.655323326587677,
"learning_rate": 9.956660434548948e-05,
"loss": 0.0858,
"step": 230
},
{
"epoch": 0.9375,
"grad_norm": 0.513111412525177,
"learning_rate": 9.947761466636014e-05,
"loss": 0.0816,
"step": 240
},
{
"epoch": 0.9765625,
"grad_norm": 0.7743618488311768,
"learning_rate": 9.938036888011337e-05,
"loss": 0.071,
"step": 250
},
{
"epoch": 1.015625,
"grad_norm": 0.45275968313217163,
"learning_rate": 9.92748832137163e-05,
"loss": 0.073,
"step": 260
},
{
"epoch": 1.0546875,
"grad_norm": 0.7138253450393677,
"learning_rate": 9.916117526908785e-05,
"loss": 0.0774,
"step": 270
},
{
"epoch": 1.09375,
"grad_norm": 0.8007267117500305,
"learning_rate": 9.903926402016153e-05,
"loss": 0.0721,
"step": 280
},
{
"epoch": 1.1328125,
"grad_norm": 0.6417205333709717,
"learning_rate": 9.890916980971936e-05,
"loss": 0.0692,
"step": 290
},
{
"epoch": 1.171875,
"grad_norm": 0.605466902256012,
"learning_rate": 9.877091434599734e-05,
"loss": 0.0728,
"step": 300
},
{
"epoch": 1.2109375,
"grad_norm": 0.6525840759277344,
"learning_rate": 9.862452069906317e-05,
"loss": 0.0758,
"step": 310
},
{
"epoch": 1.25,
"grad_norm": 1.1812381744384766,
"learning_rate": 9.847001329696653e-05,
"loss": 0.0707,
"step": 320
},
{
"epoch": 1.2890625,
"grad_norm": 0.7314594388008118,
"learning_rate": 9.830741792166295e-05,
"loss": 0.0693,
"step": 330
},
{
"epoch": 1.328125,
"grad_norm": 0.721834659576416,
"learning_rate": 9.813676170471177e-05,
"loss": 0.0702,
"step": 340
},
{
"epoch": 1.3671875,
"grad_norm": 0.5326610207557678,
"learning_rate": 9.795807312274862e-05,
"loss": 0.0684,
"step": 350
},
{
"epoch": 1.40625,
"grad_norm": 0.6198597550392151,
"learning_rate": 9.777138199273384e-05,
"loss": 0.0675,
"step": 360
},
{
"epoch": 1.4453125,
"grad_norm": 0.5808058977127075,
"learning_rate": 9.757671946697698e-05,
"loss": 0.0665,
"step": 370
},
{
"epoch": 1.484375,
"grad_norm": 0.4104309678077698,
"learning_rate": 9.737411802793852e-05,
"loss": 0.0676,
"step": 380
},
{
"epoch": 1.5234375,
"grad_norm": 0.618438720703125,
"learning_rate": 9.716361148280977e-05,
"loss": 0.0615,
"step": 390
},
{
"epoch": 1.5625,
"grad_norm": 0.4504268765449524,
"learning_rate": 9.694523495787149e-05,
"loss": 0.0721,
"step": 400
},
{
"epoch": 1.6015625,
"grad_norm": 0.333695650100708,
"learning_rate": 9.671902489263261e-05,
"loss": 0.0579,
"step": 410
},
{
"epoch": 1.640625,
"grad_norm": 0.7360309362411499,
"learning_rate": 9.648501903374975e-05,
"loss": 0.0651,
"step": 420
},
{
"epoch": 1.6796875,
"grad_norm": 0.4267025291919708,
"learning_rate": 9.624325642872853e-05,
"loss": 0.057,
"step": 430
},
{
"epoch": 1.71875,
"grad_norm": 0.3154551684856415,
"learning_rate": 9.599377741940792e-05,
"loss": 0.0696,
"step": 440
},
{
"epoch": 1.7578125,
"grad_norm": 0.4576047658920288,
"learning_rate": 9.57366236352286e-05,
"loss": 0.0595,
"step": 450
},
{
"epoch": 1.796875,
"grad_norm": 0.5197241902351379,
"learning_rate": 9.547183798628641e-05,
"loss": 0.0661,
"step": 460
},
{
"epoch": 1.8359375,
"grad_norm": 0.42932412028312683,
"learning_rate": 9.519946465617218e-05,
"loss": 0.0609,
"step": 470
},
{
"epoch": 1.875,
"grad_norm": 0.4223524034023285,
"learning_rate": 9.491954909459895e-05,
"loss": 0.0574,
"step": 480
},
{
"epoch": 1.9140625,
"grad_norm": 0.47859442234039307,
"learning_rate": 9.463213800981805e-05,
"loss": 0.0601,
"step": 490
},
{
"epoch": 1.953125,
"grad_norm": 0.31995102763175964,
"learning_rate": 9.433727936082504e-05,
"loss": 0.0555,
"step": 500
},
{
"epoch": 1.9921875,
"grad_norm": 0.3553142547607422,
"learning_rate": 9.403502234935706e-05,
"loss": 0.0582,
"step": 510
},
{
"epoch": 2.03125,
"grad_norm": 0.6797146201133728,
"learning_rate": 9.372541741168272e-05,
"loss": 0.0528,
"step": 520
},
{
"epoch": 2.0703125,
"grad_norm": 0.41948041319847107,
"learning_rate": 9.340851621018603e-05,
"loss": 0.0598,
"step": 530
},
{
"epoch": 2.109375,
"grad_norm": 0.5185118317604065,
"learning_rate": 9.308437162474579e-05,
"loss": 0.0573,
"step": 540
},
{
"epoch": 2.1484375,
"grad_norm": 0.5795381665229797,
"learning_rate": 9.275303774391165e-05,
"loss": 0.0582,
"step": 550
},
{
"epoch": 2.1875,
"grad_norm": 0.44867536425590515,
"learning_rate": 9.241456985587868e-05,
"loss": 0.0616,
"step": 560
},
{
"epoch": 2.2265625,
"grad_norm": 0.42551538348197937,
"learning_rate": 9.20690244392617e-05,
"loss": 0.0578,
"step": 570
},
{
"epoch": 2.265625,
"grad_norm": 0.6047021746635437,
"learning_rate": 9.171645915367088e-05,
"loss": 0.0523,
"step": 580
},
{
"epoch": 2.3046875,
"grad_norm": 0.5272088050842285,
"learning_rate": 9.135693283009035e-05,
"loss": 0.0648,
"step": 590
},
{
"epoch": 2.34375,
"grad_norm": 0.396763414144516,
"learning_rate": 9.099050546106133e-05,
"loss": 0.0543,
"step": 600
},
{
"epoch": 2.3828125,
"grad_norm": 0.4244937300682068,
"learning_rate": 9.061723819067154e-05,
"loss": 0.0506,
"step": 610
},
{
"epoch": 2.421875,
"grad_norm": 0.37396669387817383,
"learning_rate": 9.023719330435223e-05,
"loss": 0.0566,
"step": 620
},
{
"epoch": 2.4609375,
"grad_norm": 0.3504962623119354,
"learning_rate": 8.9850434218485e-05,
"loss": 0.0586,
"step": 630
},
{
"epoch": 2.5,
"grad_norm": 0.5627210736274719,
"learning_rate": 8.945702546981969e-05,
"loss": 0.053,
"step": 640
},
{
"epoch": 2.5390625,
"grad_norm": 0.6733695268630981,
"learning_rate": 8.905703270470549e-05,
"loss": 0.0566,
"step": 650
},
{
"epoch": 2.578125,
"grad_norm": 0.28862297534942627,
"learning_rate": 8.865052266813685e-05,
"loss": 0.0484,
"step": 660
},
{
"epoch": 2.6171875,
"grad_norm": 0.9207034111022949,
"learning_rate": 8.823756319261597e-05,
"loss": 0.0556,
"step": 670
},
{
"epoch": 2.65625,
"grad_norm": 0.5370097756385803,
"learning_rate": 8.781822318683394e-05,
"loss": 0.0533,
"step": 680
},
{
"epoch": 2.6953125,
"grad_norm": 0.3493472635746002,
"learning_rate": 8.739257262417225e-05,
"loss": 0.0539,
"step": 690
},
{
"epoch": 2.734375,
"grad_norm": 0.36126288771629333,
"learning_rate": 8.696068253102665e-05,
"loss": 0.0557,
"step": 700
},
{
"epoch": 2.7734375,
"grad_norm": 0.331190824508667,
"learning_rate": 8.652262497495527e-05,
"loss": 0.0514,
"step": 710
},
{
"epoch": 2.8125,
"grad_norm": 0.4170578420162201,
"learning_rate": 8.60784730526531e-05,
"loss": 0.0469,
"step": 720
},
{
"epoch": 2.8515625,
"grad_norm": 0.35276809334754944,
"learning_rate": 8.56283008777546e-05,
"loss": 0.0541,
"step": 730
},
{
"epoch": 2.890625,
"grad_norm": 0.5037612318992615,
"learning_rate": 8.517218356846665e-05,
"loss": 0.0493,
"step": 740
},
{
"epoch": 2.9296875,
"grad_norm": 0.3002952039241791,
"learning_rate": 8.471019723503411e-05,
"loss": 0.0526,
"step": 750
},
{
"epoch": 2.96875,
"grad_norm": 0.33518242835998535,
"learning_rate": 8.424241896703936e-05,
"loss": 0.0522,
"step": 760
},
{
"epoch": 3.0078125,
"grad_norm": 0.4655015766620636,
"learning_rate": 8.376892682053898e-05,
"loss": 0.0504,
"step": 770
},
{
"epoch": 3.046875,
"grad_norm": 0.537617564201355,
"learning_rate": 8.328979980503873e-05,
"loss": 0.0543,
"step": 780
},
{
"epoch": 3.0859375,
"grad_norm": 0.38395699858665466,
"learning_rate": 8.280511787030963e-05,
"loss": 0.0534,
"step": 790
},
{
"epoch": 3.125,
"grad_norm": 0.44339320063591003,
"learning_rate": 8.231496189304704e-05,
"loss": 0.0474,
"step": 800
},
{
"epoch": 3.1640625,
"grad_norm": 0.49106666445732117,
"learning_rate": 8.18194136633753e-05,
"loss": 0.0523,
"step": 810
},
{
"epoch": 3.203125,
"grad_norm": 0.3647231161594391,
"learning_rate": 8.131855587119957e-05,
"loss": 0.0515,
"step": 820
},
{
"epoch": 3.2421875,
"grad_norm": 0.43639010190963745,
"learning_rate": 8.081247209240788e-05,
"loss": 0.0503,
"step": 830
},
{
"epoch": 3.28125,
"grad_norm": 0.3658983111381531,
"learning_rate": 8.030124677492512e-05,
"loss": 0.053,
"step": 840
},
{
"epoch": 3.3203125,
"grad_norm": 0.23427674174308777,
"learning_rate": 7.978496522462167e-05,
"loss": 0.0497,
"step": 850
},
{
"epoch": 3.359375,
"grad_norm": 0.3150970935821533,
"learning_rate": 7.926371359107871e-05,
"loss": 0.0468,
"step": 860
},
{
"epoch": 3.3984375,
"grad_norm": 0.4161214828491211,
"learning_rate": 7.873757885321291e-05,
"loss": 0.0488,
"step": 870
},
{
"epoch": 3.4375,
"grad_norm": 0.27835017442703247,
"learning_rate": 7.820664880476256e-05,
"loss": 0.0449,
"step": 880
},
{
"epoch": 3.4765625,
"grad_norm": 0.31080129742622375,
"learning_rate": 7.767101203963786e-05,
"loss": 0.0459,
"step": 890
},
{
"epoch": 3.515625,
"grad_norm": 0.32351627945899963,
"learning_rate": 7.713075793713774e-05,
"loss": 0.0414,
"step": 900
},
{
"epoch": 3.5546875,
"grad_norm": 0.4844232499599457,
"learning_rate": 7.658597664703533e-05,
"loss": 0.0456,
"step": 910
},
{
"epoch": 3.59375,
"grad_norm": 0.49269959330558777,
"learning_rate": 7.603675907453534e-05,
"loss": 0.0443,
"step": 920
},
{
"epoch": 3.6328125,
"grad_norm": 0.29903239011764526,
"learning_rate": 7.548319686510487e-05,
"loss": 0.0481,
"step": 930
},
{
"epoch": 3.671875,
"grad_norm": 0.34350693225860596,
"learning_rate": 7.492538238918116e-05,
"loss": 0.0456,
"step": 940
},
{
"epoch": 3.7109375,
"grad_norm": 0.2822878956794739,
"learning_rate": 7.436340872675792e-05,
"loss": 0.0439,
"step": 950
},
{
"epoch": 3.75,
"grad_norm": 0.3564528226852417,
"learning_rate": 7.379736965185368e-05,
"loss": 0.0495,
"step": 960
},
{
"epoch": 3.7890625,
"grad_norm": 0.44323813915252686,
"learning_rate": 7.322735961686409e-05,
"loss": 0.0497,
"step": 970
},
{
"epoch": 3.828125,
"grad_norm": 0.4935294985771179,
"learning_rate": 7.265347373680102e-05,
"loss": 0.0474,
"step": 980
},
{
"epoch": 3.8671875,
"grad_norm": 0.3851205110549927,
"learning_rate": 7.20758077734212e-05,
"loss": 0.0475,
"step": 990
},
{
"epoch": 3.90625,
"grad_norm": 0.4426199495792389,
"learning_rate": 7.149445811924685e-05,
"loss": 0.0467,
"step": 1000
},
{
"epoch": 3.9453125,
"grad_norm": 0.4967394769191742,
"learning_rate": 7.090952178148112e-05,
"loss": 0.0508,
"step": 1010
},
{
"epoch": 3.984375,
"grad_norm": 0.3541552722454071,
"learning_rate": 7.032109636582096e-05,
"loss": 0.0457,
"step": 1020
},
{
"epoch": 4.0234375,
"grad_norm": 0.4096279740333557,
"learning_rate": 6.972928006017001e-05,
"loss": 0.0446,
"step": 1030
},
{
"epoch": 4.0625,
"grad_norm": 0.34364351630210876,
"learning_rate": 6.91341716182545e-05,
"loss": 0.0474,
"step": 1040
},
{
"epoch": 4.1015625,
"grad_norm": 0.7376656532287598,
"learning_rate": 6.853587034314463e-05,
"loss": 0.0452,
"step": 1050
},
{
"epoch": 4.140625,
"grad_norm": 0.46325570344924927,
"learning_rate": 6.793447607068437e-05,
"loss": 0.0426,
"step": 1060
},
{
"epoch": 4.1796875,
"grad_norm": 0.44051945209503174,
"learning_rate": 6.733008915283216e-05,
"loss": 0.0385,
"step": 1070
},
{
"epoch": 4.21875,
"grad_norm": 0.4191512167453766,
"learning_rate": 6.672281044091582e-05,
"loss": 0.0468,
"step": 1080
},
{
"epoch": 4.2578125,
"grad_norm": 0.23939189314842224,
"learning_rate": 6.611274126880378e-05,
"loss": 0.0417,
"step": 1090
},
{
"epoch": 4.296875,
"grad_norm": 0.2203720659017563,
"learning_rate": 6.549998343599601e-05,
"loss": 0.0457,
"step": 1100
},
{
"epoch": 4.3359375,
"grad_norm": 0.4468579888343811,
"learning_rate": 6.488463919063727e-05,
"loss": 0.0459,
"step": 1110
},
{
"epoch": 4.375,
"grad_norm": 0.39386874437332153,
"learning_rate": 6.426681121245527e-05,
"loss": 0.0439,
"step": 1120
},
{
"epoch": 4.4140625,
"grad_norm": 0.29376569390296936,
"learning_rate": 6.364660259562713e-05,
"loss": 0.0439,
"step": 1130
},
{
"epoch": 4.453125,
"grad_norm": 0.24146665632724762,
"learning_rate": 6.302411683157637e-05,
"loss": 0.0429,
"step": 1140
},
{
"epoch": 4.4921875,
"grad_norm": 0.3997792899608612,
"learning_rate": 6.239945779170389e-05,
"loss": 0.0463,
"step": 1150
},
{
"epoch": 4.53125,
"grad_norm": 0.302913635969162,
"learning_rate": 6.177272971005528e-05,
"loss": 0.0503,
"step": 1160
},
{
"epoch": 4.5703125,
"grad_norm": 0.4844031035900116,
"learning_rate": 6.114403716592794e-05,
"loss": 0.0489,
"step": 1170
},
{
"epoch": 4.609375,
"grad_norm": 0.34038305282592773,
"learning_rate": 6.051348506642017e-05,
"loss": 0.0436,
"step": 1180
},
{
"epoch": 4.6484375,
"grad_norm": 0.5346148610115051,
"learning_rate": 5.988117862892601e-05,
"loss": 0.0456,
"step": 1190
},
{
"epoch": 4.6875,
"grad_norm": 0.28704309463500977,
"learning_rate": 5.924722336357793e-05,
"loss": 0.0421,
"step": 1200
},
{
"epoch": 4.7265625,
"grad_norm": 0.2789936065673828,
"learning_rate": 5.861172505564091e-05,
"loss": 0.0414,
"step": 1210
},
{
"epoch": 4.765625,
"grad_norm": 0.29666388034820557,
"learning_rate": 5.797478974786055e-05,
"loss": 0.0476,
"step": 1220
},
{
"epoch": 4.8046875,
"grad_norm": 0.520137369632721,
"learning_rate": 5.733652372276809e-05,
"loss": 0.0399,
"step": 1230
},
{
"epoch": 4.84375,
"grad_norm": 0.29389891028404236,
"learning_rate": 5.669703348494564e-05,
"loss": 0.0426,
"step": 1240
},
{
"epoch": 4.8828125,
"grad_norm": 0.4732462465763092,
"learning_rate": 5.605642574325413e-05,
"loss": 0.0436,
"step": 1250
},
{
"epoch": 4.921875,
"grad_norm": 0.6422872543334961,
"learning_rate": 5.5414807393027376e-05,
"loss": 0.0385,
"step": 1260
},
{
"epoch": 4.9609375,
"grad_norm": 0.31786808371543884,
"learning_rate": 5.47722854982349e-05,
"loss": 0.0449,
"step": 1270
},
{
"epoch": 5.0,
"grad_norm": 0.7411214113235474,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.0379,
"step": 1280
},
{
"epoch": 5.0390625,
"grad_norm": 0.440328449010849,
"learning_rate": 5.348496006679244e-05,
"loss": 0.0404,
"step": 1290
},
{
"epoch": 5.078125,
"grad_norm": 0.40569090843200684,
"learning_rate": 5.284037134034964e-05,
"loss": 0.0481,
"step": 1300
},
{
"epoch": 5.1171875,
"grad_norm": 0.23646467924118042,
"learning_rate": 5.2195308653911035e-05,
"loss": 0.0365,
"step": 1310
},
{
"epoch": 5.15625,
"grad_norm": 0.41665926575660706,
"learning_rate": 5.154987964618704e-05,
"loss": 0.0364,
"step": 1320
},
{
"epoch": 5.1953125,
"grad_norm": 0.24862350523471832,
"learning_rate": 5.090419201701445e-05,
"loss": 0.041,
"step": 1330
},
{
"epoch": 5.234375,
"grad_norm": 0.3748071491718292,
"learning_rate": 5.025835350938506e-05,
"loss": 0.0477,
"step": 1340
},
{
"epoch": 5.2734375,
"grad_norm": 0.40436264872550964,
"learning_rate": 4.961247189146707e-05,
"loss": 0.0348,
"step": 1350
},
{
"epoch": 5.3125,
"grad_norm": 0.4232139587402344,
"learning_rate": 4.8966654938622295e-05,
"loss": 0.0367,
"step": 1360
},
{
"epoch": 5.3515625,
"grad_norm": 0.26344922184944153,
"learning_rate": 4.832101041542219e-05,
"loss": 0.0355,
"step": 1370
},
{
"epoch": 5.390625,
"grad_norm": 0.4235900044441223,
"learning_rate": 4.7675646057665644e-05,
"loss": 0.0432,
"step": 1380
},
{
"epoch": 5.4296875,
"grad_norm": 0.5052417516708374,
"learning_rate": 4.7030669554401596e-05,
"loss": 0.0353,
"step": 1390
},
{
"epoch": 5.46875,
"grad_norm": 0.38285690546035767,
"learning_rate": 4.638618852995947e-05,
"loss": 0.0401,
"step": 1400
},
{
"epoch": 5.5078125,
"grad_norm": 0.6338162422180176,
"learning_rate": 4.574231052599034e-05,
"loss": 0.0418,
"step": 1410
},
{
"epoch": 5.546875,
"grad_norm": 0.3230713903903961,
"learning_rate": 4.509914298352197e-05,
"loss": 0.0372,
"step": 1420
},
{
"epoch": 5.5859375,
"grad_norm": 0.3894837498664856,
"learning_rate": 4.445679322503063e-05,
"loss": 0.0387,
"step": 1430
},
{
"epoch": 5.625,
"grad_norm": 0.3093172609806061,
"learning_rate": 4.381536843653262e-05,
"loss": 0.0363,
"step": 1440
},
{
"epoch": 5.6640625,
"grad_norm": 0.4512802064418793,
"learning_rate": 4.317497564969868e-05,
"loss": 0.0382,
"step": 1450
},
{
"epoch": 5.703125,
"grad_norm": 0.3944973051548004,
"learning_rate": 4.253572172399407e-05,
"loss": 0.0413,
"step": 1460
},
{
"epoch": 5.7421875,
"grad_norm": 0.4658920466899872,
"learning_rate": 4.18977133288474e-05,
"loss": 0.0363,
"step": 1470
},
{
"epoch": 5.78125,
"grad_norm": 0.3259652554988861,
"learning_rate": 4.126105692585121e-05,
"loss": 0.0372,
"step": 1480
},
{
"epoch": 5.8203125,
"grad_norm": 0.3776373863220215,
"learning_rate": 4.0625858750997184e-05,
"loss": 0.0366,
"step": 1490
},
{
"epoch": 5.859375,
"grad_norm": 0.22152118384838104,
"learning_rate": 3.999222479694901e-05,
"loss": 0.038,
"step": 1500
},
{
"epoch": 5.8984375,
"grad_norm": 0.3621926009654999,
"learning_rate": 3.936026079535593e-05,
"loss": 0.0402,
"step": 1510
},
{
"epoch": 5.9375,
"grad_norm": 0.3338565230369568,
"learning_rate": 3.87300721992097e-05,
"loss": 0.0417,
"step": 1520
},
{
"epoch": 5.9765625,
"grad_norm": 0.5546092391014099,
"learning_rate": 3.810176416524812e-05,
"loss": 0.0357,
"step": 1530
},
{
"epoch": 6.015625,
"grad_norm": 0.8308889865875244,
"learning_rate": 3.747544153640809e-05,
"loss": 0.0382,
"step": 1540
},
{
"epoch": 6.0546875,
"grad_norm": 0.24918757379055023,
"learning_rate": 3.6851208824330844e-05,
"loss": 0.0361,
"step": 1550
},
{
"epoch": 6.09375,
"grad_norm": 0.5025043487548828,
"learning_rate": 3.622917019192261e-05,
"loss": 0.0385,
"step": 1560
},
{
"epoch": 6.1328125,
"grad_norm": 0.30080971121788025,
"learning_rate": 3.560942943597344e-05,
"loss": 0.0378,
"step": 1570
},
{
"epoch": 6.171875,
"grad_norm": 0.2558280825614929,
"learning_rate": 3.4992089969837014e-05,
"loss": 0.0334,
"step": 1580
},
{
"epoch": 6.2109375,
"grad_norm": 0.3407670259475708,
"learning_rate": 3.437725480617462e-05,
"loss": 0.0319,
"step": 1590
},
{
"epoch": 6.25,
"grad_norm": 0.33768805861473083,
"learning_rate": 3.3765026539765834e-05,
"loss": 0.0372,
"step": 1600
},
{
"epoch": 6.2890625,
"grad_norm": 0.3476319909095764,
"learning_rate": 3.3155507330389e-05,
"loss": 0.0369,
"step": 1610
},
{
"epoch": 6.328125,
"grad_norm": 0.45832690596580505,
"learning_rate": 3.254879888577432e-05,
"loss": 0.0308,
"step": 1620
},
{
"epoch": 6.3671875,
"grad_norm": 0.3107905387878418,
"learning_rate": 3.194500244463229e-05,
"loss": 0.0344,
"step": 1630
},
{
"epoch": 6.40625,
"grad_norm": 0.30494946241378784,
"learning_rate": 3.13442187597605e-05,
"loss": 0.0365,
"step": 1640
},
{
"epoch": 6.4453125,
"grad_norm": 0.4013335406780243,
"learning_rate": 3.074654808123143e-05,
"loss": 0.0357,
"step": 1650
},
{
"epoch": 6.484375,
"grad_norm": 0.49742478132247925,
"learning_rate": 3.0152090139664174e-05,
"loss": 0.0339,
"step": 1660
},
{
"epoch": 6.5234375,
"grad_norm": 0.3577755391597748,
"learning_rate": 2.9560944129582896e-05,
"loss": 0.0399,
"step": 1670
},
{
"epoch": 6.5625,
"grad_norm": 0.40309587121009827,
"learning_rate": 2.8973208692864624e-05,
"loss": 0.0347,
"step": 1680
},
{
"epoch": 6.6015625,
"grad_norm": 0.2728106677532196,
"learning_rate": 2.838898190227931e-05,
"loss": 0.0333,
"step": 1690
},
{
"epoch": 6.640625,
"grad_norm": 0.2323320060968399,
"learning_rate": 2.780836124512494e-05,
"loss": 0.0362,
"step": 1700
},
{
"epoch": 6.6796875,
"grad_norm": 0.3762952387332916,
"learning_rate": 2.7231443606960238e-05,
"loss": 0.0352,
"step": 1710
},
{
"epoch": 6.71875,
"grad_norm": 0.31488674879074097,
"learning_rate": 2.6658325255437744e-05,
"loss": 0.0318,
"step": 1720
},
{
"epoch": 6.7578125,
"grad_norm": 0.3722898066043854,
"learning_rate": 2.6089101824240138e-05,
"loss": 0.0341,
"step": 1730
},
{
"epoch": 6.796875,
"grad_norm": 0.41492217779159546,
"learning_rate": 2.5523868297122265e-05,
"loss": 0.0446,
"step": 1740
},
{
"epoch": 6.8359375,
"grad_norm": 0.2935633361339569,
"learning_rate": 2.4962718992061634e-05,
"loss": 0.0381,
"step": 1750
},
{
"epoch": 6.875,
"grad_norm": 0.30372825264930725,
"learning_rate": 2.4405747545519963e-05,
"loss": 0.0369,
"step": 1760
},
{
"epoch": 6.9140625,
"grad_norm": 0.265733540058136,
"learning_rate": 2.385304689681847e-05,
"loss": 0.0319,
"step": 1770
},
{
"epoch": 6.953125,
"grad_norm": 0.22491760551929474,
"learning_rate": 2.3304709272629584e-05,
"loss": 0.0333,
"step": 1780
},
{
"epoch": 6.9921875,
"grad_norm": 0.3895363211631775,
"learning_rate": 2.276082617158743e-05,
"loss": 0.0367,
"step": 1790
},
{
"epoch": 7.03125,
"grad_norm": 0.24833568930625916,
"learning_rate": 2.2221488349019903e-05,
"loss": 0.0292,
"step": 1800
},
{
"epoch": 7.0703125,
"grad_norm": 0.3045443594455719,
"learning_rate": 2.1686785801804653e-05,
"loss": 0.0322,
"step": 1810
},
{
"epoch": 7.109375,
"grad_norm": 0.2556830644607544,
"learning_rate": 2.115680775335184e-05,
"loss": 0.0325,
"step": 1820
},
{
"epoch": 7.1484375,
"grad_norm": 0.28755396604537964,
"learning_rate": 2.063164263871573e-05,
"loss": 0.0333,
"step": 1830
},
{
"epoch": 7.1875,
"grad_norm": 0.4773646593093872,
"learning_rate": 2.0111378089837956e-05,
"loss": 0.0333,
"step": 1840
},
{
"epoch": 7.2265625,
"grad_norm": 0.375969260931015,
"learning_rate": 1.959610092092479e-05,
"loss": 0.0322,
"step": 1850
},
{
"epoch": 7.265625,
"grad_norm": 0.2566347122192383,
"learning_rate": 1.9085897113960793e-05,
"loss": 0.0312,
"step": 1860
},
{
"epoch": 7.3046875,
"grad_norm": 0.513367235660553,
"learning_rate": 1.858085180436147e-05,
"loss": 0.0337,
"step": 1870
},
{
"epoch": 7.34375,
"grad_norm": 0.31472358107566833,
"learning_rate": 1.8081049266767046e-05,
"loss": 0.0302,
"step": 1880
},
{
"epoch": 7.3828125,
"grad_norm": 0.31810522079467773,
"learning_rate": 1.758657290097997e-05,
"loss": 0.03,
"step": 1890
},
{
"epoch": 7.421875,
"grad_norm": 0.3877921998500824,
"learning_rate": 1.709750521804831e-05,
"loss": 0.0384,
"step": 1900
},
{
"epoch": 7.4609375,
"grad_norm": 0.45386990904808044,
"learning_rate": 1.661392782649755e-05,
"loss": 0.037,
"step": 1910
},
{
"epoch": 7.5,
"grad_norm": 0.33699849247932434,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.0316,
"step": 1920
},
{
"epoch": 7.5390625,
"grad_norm": 0.31667983531951904,
"learning_rate": 1.5663565757474773e-05,
"loss": 0.029,
"step": 1930
},
{
"epoch": 7.578125,
"grad_norm": 0.3529506325721741,
"learning_rate": 1.5196939662648569e-05,
"loss": 0.0353,
"step": 1940
},
{
"epoch": 7.6171875,
"grad_norm": 0.3233952820301056,
"learning_rate": 1.4736120998032949e-05,
"loss": 0.0331,
"step": 1950
},
{
"epoch": 7.65625,
"grad_norm": 0.439943790435791,
"learning_rate": 1.428118665836668e-05,
"loss": 0.0296,
"step": 1960
},
{
"epoch": 7.6953125,
"grad_norm": 0.25135713815689087,
"learning_rate": 1.3832212556497748e-05,
"loss": 0.0312,
"step": 1970
},
{
"epoch": 7.734375,
"grad_norm": 0.4036009609699249,
"learning_rate": 1.338927361071603e-05,
"loss": 0.0324,
"step": 1980
},
{
"epoch": 7.7734375,
"grad_norm": 0.27127811312675476,
"learning_rate": 1.2952443732252057e-05,
"loss": 0.0305,
"step": 1990
},
{
"epoch": 7.8125,
"grad_norm": 0.569515585899353,
"learning_rate": 1.2521795812943704e-05,
"loss": 0.0286,
"step": 2000
},
{
"epoch": 7.8515625,
"grad_norm": 0.7388462424278259,
"learning_rate": 1.2097401713073175e-05,
"loss": 0.0329,
"step": 2010
},
{
"epoch": 7.890625,
"grad_norm": 0.35602420568466187,
"learning_rate": 1.1679332249375923e-05,
"loss": 0.032,
"step": 2020
},
{
"epoch": 7.9296875,
"grad_norm": 0.23437829315662384,
"learning_rate": 1.1267657183223812e-05,
"loss": 0.0325,
"step": 2030
},
{
"epoch": 7.96875,
"grad_norm": 0.8530910015106201,
"learning_rate": 1.086244520898428e-05,
"loss": 0.0365,
"step": 2040
},
{
"epoch": 8.0078125,
"grad_norm": 0.44752374291419983,
"learning_rate": 1.046376394255773e-05,
"loss": 0.0309,
"step": 2050
},
{
"epoch": 8.046875,
"grad_norm": 0.5457649230957031,
"learning_rate": 1.00716799100947e-05,
"loss": 0.0317,
"step": 2060
},
{
"epoch": 8.0859375,
"grad_norm": 0.4462254047393799,
"learning_rate": 9.686258536894948e-06,
"loss": 0.0286,
"step": 2070
},
{
"epoch": 8.125,
"grad_norm": 0.2501213252544403,
"learning_rate": 9.307564136490254e-06,
"loss": 0.0351,
"step": 2080
},
{
"epoch": 8.1640625,
"grad_norm": 0.49755340814590454,
"learning_rate": 8.935659899912658e-06,
"loss": 0.0328,
"step": 2090
},
{
"epoch": 8.203125,
"grad_norm": 0.4353477954864502,
"learning_rate": 8.5706078851501e-06,
"loss": 0.0306,
"step": 2100
},
{
"epoch": 8.2421875,
"grad_norm": 0.2827202379703522,
"learning_rate": 8.21246900679109e-06,
"loss": 0.0316,
"step": 2110
},
{
"epoch": 8.28125,
"grad_norm": 0.26606473326683044,
"learning_rate": 7.861303025860135e-06,
"loss": 0.0254,
"step": 2120
},
{
"epoch": 8.3203125,
"grad_norm": 0.251438170671463,
"learning_rate": 7.517168539845709e-06,
"loss": 0.0258,
"step": 2130
},
{
"epoch": 8.359375,
"grad_norm": 0.38992422819137573,
"learning_rate": 7.180122972922304e-06,
"loss": 0.03,
"step": 2140
},
{
"epoch": 8.3984375,
"grad_norm": 0.4747330844402313,
"learning_rate": 6.8502225663683975e-06,
"loss": 0.0305,
"step": 2150
},
{
"epoch": 8.4375,
"grad_norm": 0.29881390929222107,
"learning_rate": 6.527522369181655e-06,
"loss": 0.0255,
"step": 2160
},
{
"epoch": 8.4765625,
"grad_norm": 0.696563720703125,
"learning_rate": 6.212076228893205e-06,
"loss": 0.0286,
"step": 2170
},
{
"epoch": 8.515625,
"grad_norm": 0.26265567541122437,
"learning_rate": 5.903936782582253e-06,
"loss": 0.033,
"step": 2180
},
{
"epoch": 8.5546875,
"grad_norm": 0.42761778831481934,
"learning_rate": 5.603155448092895e-06,
"loss": 0.0296,
"step": 2190
},
{
"epoch": 8.59375,
"grad_norm": 0.2030072659254074,
"learning_rate": 5.309782415454151e-06,
"loss": 0.0289,
"step": 2200
},
{
"epoch": 8.6328125,
"grad_norm": 0.2712879776954651,
"learning_rate": 5.023866638505031e-06,
"loss": 0.0354,
"step": 2210
},
{
"epoch": 8.671875,
"grad_norm": 0.26241105794906616,
"learning_rate": 4.745455826725825e-06,
"loss": 0.0316,
"step": 2220
},
{
"epoch": 8.7109375,
"grad_norm": 0.2872694134712219,
"learning_rate": 4.474596437276996e-06,
"loss": 0.03,
"step": 2230
},
{
"epoch": 8.75,
"grad_norm": 0.28568607568740845,
"learning_rate": 4.2113336672471245e-06,
"loss": 0.0294,
"step": 2240
},
{
"epoch": 8.7890625,
"grad_norm": 0.3385384976863861,
"learning_rate": 3.955711446111082e-06,
"loss": 0.0304,
"step": 2250
},
{
"epoch": 8.828125,
"grad_norm": 0.675881564617157,
"learning_rate": 3.707772428399681e-06,
"loss": 0.0334,
"step": 2260
},
{
"epoch": 8.8671875,
"grad_norm": 0.5481550097465515,
"learning_rate": 3.467557986582104e-06,
"loss": 0.0297,
"step": 2270
},
{
"epoch": 8.90625,
"grad_norm": 0.26067742705345154,
"learning_rate": 3.2351082041623125e-06,
"loss": 0.0287,
"step": 2280
},
{
"epoch": 8.9453125,
"grad_norm": 0.5334944128990173,
"learning_rate": 3.0104618689904386e-06,
"loss": 0.0322,
"step": 2290
},
{
"epoch": 8.984375,
"grad_norm": 0.37815043330192566,
"learning_rate": 2.793656466790473e-06,
"loss": 0.0255,
"step": 2300
},
{
"epoch": 9.0234375,
"grad_norm": 0.1792028248310089,
"learning_rate": 2.584728174905171e-06,
"loss": 0.029,
"step": 2310
},
{
"epoch": 9.0625,
"grad_norm": 0.8325735926628113,
"learning_rate": 2.3837118562592797e-06,
"loss": 0.0301,
"step": 2320
},
{
"epoch": 9.1015625,
"grad_norm": 0.28852543234825134,
"learning_rate": 2.1906410535421697e-06,
"loss": 0.0303,
"step": 2330
},
{
"epoch": 9.140625,
"grad_norm": 0.5437615513801575,
"learning_rate": 2.005547983610684e-06,
"loss": 0.0319,
"step": 2340
},
{
"epoch": 9.1796875,
"grad_norm": 0.2050412893295288,
"learning_rate": 1.828463532113256e-06,
"loss": 0.029,
"step": 2350
},
{
"epoch": 9.21875,
"grad_norm": 0.38466140627861023,
"learning_rate": 1.6594172483361758e-06,
"loss": 0.0261,
"step": 2360
},
{
"epoch": 9.2578125,
"grad_norm": 0.30740851163864136,
"learning_rate": 1.4984373402728014e-06,
"loss": 0.0258,
"step": 2370
},
{
"epoch": 9.296875,
"grad_norm": 0.33520451188087463,
"learning_rate": 1.3455506699166732e-06,
"loss": 0.0305,
"step": 2380
},
{
"epoch": 9.3359375,
"grad_norm": 0.2344502955675125,
"learning_rate": 1.2007827487791112e-06,
"loss": 0.0307,
"step": 2390
},
{
"epoch": 9.375,
"grad_norm": 0.4900089502334595,
"learning_rate": 1.064157733632276e-06,
"loss": 0.0305,
"step": 2400
},
{
"epoch": 9.4140625,
"grad_norm": 0.34539440274238586,
"learning_rate": 9.35698422478204e-07,
"loss": 0.0272,
"step": 2410
},
{
"epoch": 9.453125,
"grad_norm": 0.4583103358745575,
"learning_rate": 8.154262507446109e-07,
"loss": 0.0279,
"step": 2420
},
{
"epoch": 9.4921875,
"grad_norm": 0.39252835512161255,
"learning_rate": 7.033612877080819e-07,
"loss": 0.0339,
"step": 2430
},
{
"epoch": 9.53125,
"grad_norm": 0.6091126799583435,
"learning_rate": 5.995222331451722e-07,
"loss": 0.028,
"step": 2440
},
{
"epoch": 9.5703125,
"grad_norm": 0.36491745710372925,
"learning_rate": 5.039264142120803e-07,
"loss": 0.0295,
"step": 2450
},
{
"epoch": 9.609375,
"grad_norm": 0.31240883469581604,
"learning_rate": 4.165897825533227e-07,
"loss": 0.028,
"step": 2460
},
{
"epoch": 9.6484375,
"grad_norm": 0.35602691769599915,
"learning_rate": 3.375269116399793e-07,
"loss": 0.0359,
"step": 2470
},
{
"epoch": 9.6875,
"grad_norm": 0.2611185312271118,
"learning_rate": 2.667509943378721e-07,
"loss": 0.0244,
"step": 2480
},
{
"epoch": 9.7265625,
"grad_norm": 0.3753460943698883,
"learning_rate": 2.0427384070612065e-07,
"loss": 0.0278,
"step": 2490
},
{
"epoch": 9.765625,
"grad_norm": 0.4795132577419281,
"learning_rate": 1.5010587602647397e-07,
"loss": 0.0324,
"step": 2500
},
{
"epoch": 9.8046875,
"grad_norm": 0.4804089069366455,
"learning_rate": 1.0425613906365761e-07,
"loss": 0.0289,
"step": 2510
},
{
"epoch": 9.84375,
"grad_norm": 0.3166308104991913,
"learning_rate": 6.673228055715241e-08,
"loss": 0.0289,
"step": 2520
},
{
"epoch": 9.8828125,
"grad_norm": 0.3317022919654846,
"learning_rate": 3.754056194452682e-08,
"loss": 0.0327,
"step": 2530
},
{
"epoch": 9.921875,
"grad_norm": 0.2820194363594055,
"learning_rate": 1.668585431663372e-08,
"loss": 0.0268,
"step": 2540
},
{
"epoch": 9.9609375,
"grad_norm": 0.3767685890197754,
"learning_rate": 4.171637604760692e-09,
"loss": 0.0298,
"step": 2550
},
{
"epoch": 10.0,
"grad_norm": 0.375841349363327,
"learning_rate": 0.0,
"loss": 0.0277,
"step": 2560
},
{
"epoch": 10.0,
"step": 2560,
"total_flos": 2.6088527768247245e+17,
"train_loss": 0.060091388958971946,
"train_runtime": 2712.9359,
"train_samples_per_second": 46.127,
"train_steps_per_second": 0.944
}
],
"logging_steps": 10,
"max_steps": 2560,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.6088527768247245e+17,
"train_batch_size": 49,
"trial_name": null,
"trial_params": null
}