llama3.2-instruct / trainer_state.json
suronek's picture
Upload trainer_state.json
a2b911e verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1941,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015455950540958269,
"grad_norm": 9.875,
"learning_rate": 7.627118644067798e-06,
"loss": 3.7349,
"step": 10
},
{
"epoch": 0.030911901081916538,
"grad_norm": 8.625,
"learning_rate": 1.6101694915254237e-05,
"loss": 3.5453,
"step": 20
},
{
"epoch": 0.04636785162287481,
"grad_norm": 7.40625,
"learning_rate": 2.457627118644068e-05,
"loss": 3.441,
"step": 30
},
{
"epoch": 0.061823802163833076,
"grad_norm": 5.71875,
"learning_rate": 3.305084745762712e-05,
"loss": 3.4128,
"step": 40
},
{
"epoch": 0.07727975270479134,
"grad_norm": 6.21875,
"learning_rate": 4.152542372881356e-05,
"loss": 3.1485,
"step": 50
},
{
"epoch": 0.09273570324574962,
"grad_norm": 5.0,
"learning_rate": 5e-05,
"loss": 3.0359,
"step": 60
},
{
"epoch": 0.10819165378670788,
"grad_norm": 4.1875,
"learning_rate": 4.999651694417285e-05,
"loss": 3.2478,
"step": 70
},
{
"epoch": 0.12364760432766615,
"grad_norm": 3.5,
"learning_rate": 4.9986068747225644e-05,
"loss": 3.1223,
"step": 80
},
{
"epoch": 0.1391035548686244,
"grad_norm": 3.890625,
"learning_rate": 4.9968658320490636e-05,
"loss": 3.0477,
"step": 90
},
{
"epoch": 0.1545595054095827,
"grad_norm": 3.859375,
"learning_rate": 4.994429051528689e-05,
"loss": 3.0829,
"step": 100
},
{
"epoch": 0.17001545595054096,
"grad_norm": 3.734375,
"learning_rate": 4.991297212156848e-05,
"loss": 2.9565,
"step": 110
},
{
"epoch": 0.18547140649149924,
"grad_norm": 3.671875,
"learning_rate": 4.9874711866032495e-05,
"loss": 3.142,
"step": 120
},
{
"epoch": 0.2009273570324575,
"grad_norm": 5.15625,
"learning_rate": 4.982952040968744e-05,
"loss": 3.0807,
"step": 130
},
{
"epoch": 0.21638330757341576,
"grad_norm": 4.625,
"learning_rate": 4.977741034488251e-05,
"loss": 2.9314,
"step": 140
},
{
"epoch": 0.23183925811437403,
"grad_norm": 4.125,
"learning_rate": 4.971839619179892e-05,
"loss": 3.1325,
"step": 150
},
{
"epoch": 0.2472952086553323,
"grad_norm": 4.65625,
"learning_rate": 4.965249439440384e-05,
"loss": 2.8936,
"step": 160
},
{
"epoch": 0.26275115919629055,
"grad_norm": 5.0625,
"learning_rate": 4.957972331586843e-05,
"loss": 2.9205,
"step": 170
},
{
"epoch": 0.2782071097372488,
"grad_norm": 4.34375,
"learning_rate": 4.950010323345103e-05,
"loss": 2.9776,
"step": 180
},
{
"epoch": 0.2936630602782071,
"grad_norm": 4.53125,
"learning_rate": 4.941365633284699e-05,
"loss": 2.9432,
"step": 190
},
{
"epoch": 0.3091190108191654,
"grad_norm": 4.25,
"learning_rate": 4.932040670200677e-05,
"loss": 2.9593,
"step": 200
},
{
"epoch": 0.32457496136012365,
"grad_norm": 5.03125,
"learning_rate": 4.9220380324424006e-05,
"loss": 2.9826,
"step": 210
},
{
"epoch": 0.3400309119010819,
"grad_norm": 4.28125,
"learning_rate": 4.911360507189526e-05,
"loss": 2.8009,
"step": 220
},
{
"epoch": 0.3554868624420402,
"grad_norm": 4.5625,
"learning_rate": 4.900011069675378e-05,
"loss": 2.7733,
"step": 230
},
{
"epoch": 0.37094281298299847,
"grad_norm": 3.609375,
"learning_rate": 4.8879928823579136e-05,
"loss": 2.8954,
"step": 240
},
{
"epoch": 0.38639876352395675,
"grad_norm": 3.1875,
"learning_rate": 4.875309294038523e-05,
"loss": 2.8972,
"step": 250
},
{
"epoch": 0.401854714064915,
"grad_norm": 4.5,
"learning_rate": 4.8619638389289026e-05,
"loss": 2.9377,
"step": 260
},
{
"epoch": 0.41731066460587324,
"grad_norm": 3.796875,
"learning_rate": 4.8479602356662665e-05,
"loss": 2.8918,
"step": 270
},
{
"epoch": 0.4327666151468315,
"grad_norm": 5.0625,
"learning_rate": 4.833302386277171e-05,
"loss": 2.8245,
"step": 280
},
{
"epoch": 0.4482225656877898,
"grad_norm": 3.03125,
"learning_rate": 4.817994375090233e-05,
"loss": 2.8384,
"step": 290
},
{
"epoch": 0.46367851622874806,
"grad_norm": 3.390625,
"learning_rate": 4.80204046759806e-05,
"loss": 2.9315,
"step": 300
},
{
"epoch": 0.47913446676970634,
"grad_norm": 3.75,
"learning_rate": 4.785445109268687e-05,
"loss": 2.8416,
"step": 310
},
{
"epoch": 0.4945904173106646,
"grad_norm": 3.734375,
"learning_rate": 4.768212924306877e-05,
"loss": 2.8615,
"step": 320
},
{
"epoch": 0.5100463678516228,
"grad_norm": 3.28125,
"learning_rate": 4.75034871436561e-05,
"loss": 2.8885,
"step": 330
},
{
"epoch": 0.5255023183925811,
"grad_norm": 3.96875,
"learning_rate": 4.7318574572081275e-05,
"loss": 2.8238,
"step": 340
},
{
"epoch": 0.5409582689335394,
"grad_norm": 4.53125,
"learning_rate": 4.712744305320911e-05,
"loss": 2.816,
"step": 350
},
{
"epoch": 0.5564142194744977,
"grad_norm": 2.9375,
"learning_rate": 4.6930145844779626e-05,
"loss": 2.6657,
"step": 360
},
{
"epoch": 0.5718701700154559,
"grad_norm": 3.78125,
"learning_rate": 4.672673792256816e-05,
"loss": 2.8454,
"step": 370
},
{
"epoch": 0.5873261205564142,
"grad_norm": 3.375,
"learning_rate": 4.651727596506661e-05,
"loss": 2.8263,
"step": 380
},
{
"epoch": 0.6027820710973725,
"grad_norm": 4.03125,
"learning_rate": 4.63018183376903e-05,
"loss": 2.9417,
"step": 390
},
{
"epoch": 0.6182380216383307,
"grad_norm": 3.453125,
"learning_rate": 4.60804250765148e-05,
"loss": 2.8449,
"step": 400
},
{
"epoch": 0.633693972179289,
"grad_norm": 3.703125,
"learning_rate": 4.5853157871547184e-05,
"loss": 2.8171,
"step": 410
},
{
"epoch": 0.6491499227202473,
"grad_norm": 3.25,
"learning_rate": 4.562008004953644e-05,
"loss": 2.9253,
"step": 420
},
{
"epoch": 0.6646058732612056,
"grad_norm": 4.5,
"learning_rate": 4.538125655632789e-05,
"loss": 2.8203,
"step": 430
},
{
"epoch": 0.6800618238021638,
"grad_norm": 3.71875,
"learning_rate": 4.5136753938766286e-05,
"loss": 2.8078,
"step": 440
},
{
"epoch": 0.6955177743431221,
"grad_norm": 3.265625,
"learning_rate": 4.4886640326152985e-05,
"loss": 2.7962,
"step": 450
},
{
"epoch": 0.7109737248840804,
"grad_norm": 4.1875,
"learning_rate": 4.4630985411262054e-05,
"loss": 2.7565,
"step": 460
},
{
"epoch": 0.7264296754250387,
"grad_norm": 4.0,
"learning_rate": 4.4369860430920776e-05,
"loss": 2.7677,
"step": 470
},
{
"epoch": 0.7418856259659969,
"grad_norm": 3.0,
"learning_rate": 4.41033381461599e-05,
"loss": 2.7879,
"step": 480
},
{
"epoch": 0.7573415765069552,
"grad_norm": 4.71875,
"learning_rate": 4.383149282193919e-05,
"loss": 2.6332,
"step": 490
},
{
"epoch": 0.7727975270479135,
"grad_norm": 4.96875,
"learning_rate": 4.35544002064539e-05,
"loss": 2.7762,
"step": 500
},
{
"epoch": 0.7882534775888718,
"grad_norm": 4.375,
"learning_rate": 4.327213751002794e-05,
"loss": 2.8478,
"step": 510
},
{
"epoch": 0.80370942812983,
"grad_norm": 4.0625,
"learning_rate": 4.298478338359968e-05,
"loss": 2.7518,
"step": 520
},
{
"epoch": 0.8191653786707882,
"grad_norm": 4.40625,
"learning_rate": 4.269241789680629e-05,
"loss": 2.747,
"step": 530
},
{
"epoch": 0.8346213292117465,
"grad_norm": 3.4375,
"learning_rate": 4.239512251567275e-05,
"loss": 2.7868,
"step": 540
},
{
"epoch": 0.8500772797527048,
"grad_norm": 3.859375,
"learning_rate": 4.2092980079911836e-05,
"loss": 2.694,
"step": 550
},
{
"epoch": 0.865533230293663,
"grad_norm": 5.34375,
"learning_rate": 4.178607477984128e-05,
"loss": 2.693,
"step": 560
},
{
"epoch": 0.8809891808346213,
"grad_norm": 6.53125,
"learning_rate": 4.147449213292457e-05,
"loss": 2.7596,
"step": 570
},
{
"epoch": 0.8964451313755796,
"grad_norm": 3.875,
"learning_rate": 4.115831895994203e-05,
"loss": 2.7299,
"step": 580
},
{
"epoch": 0.9119010819165378,
"grad_norm": 3.796875,
"learning_rate": 4.0837643360798666e-05,
"loss": 2.8138,
"step": 590
},
{
"epoch": 0.9273570324574961,
"grad_norm": 4.21875,
"learning_rate": 4.051255468997562e-05,
"loss": 2.7069,
"step": 600
},
{
"epoch": 0.9428129829984544,
"grad_norm": 4.125,
"learning_rate": 4.018314353163202e-05,
"loss": 2.6954,
"step": 610
},
{
"epoch": 0.9582689335394127,
"grad_norm": 3.78125,
"learning_rate": 3.984950167436423e-05,
"loss": 2.5214,
"step": 620
},
{
"epoch": 0.973724884080371,
"grad_norm": 3.859375,
"learning_rate": 3.9511722085629485e-05,
"loss": 2.7825,
"step": 630
},
{
"epoch": 0.9891808346213292,
"grad_norm": 3.234375,
"learning_rate": 3.916989888584095e-05,
"loss": 2.7346,
"step": 640
},
{
"epoch": 1.0046367851622875,
"grad_norm": 3.9375,
"learning_rate": 3.882412732214167e-05,
"loss": 2.5301,
"step": 650
},
{
"epoch": 1.0200927357032457,
"grad_norm": 3.65625,
"learning_rate": 3.847450374186441e-05,
"loss": 2.1356,
"step": 660
},
{
"epoch": 1.035548686244204,
"grad_norm": 3.78125,
"learning_rate": 3.812112556568508e-05,
"loss": 2.0337,
"step": 670
},
{
"epoch": 1.0510046367851622,
"grad_norm": 4.96875,
"learning_rate": 3.776409126047692e-05,
"loss": 2.1394,
"step": 680
},
{
"epoch": 1.0664605873261206,
"grad_norm": 5.15625,
"learning_rate": 3.740350031187332e-05,
"loss": 1.9865,
"step": 690
},
{
"epoch": 1.0819165378670788,
"grad_norm": 3.671875,
"learning_rate": 3.7039453196546645e-05,
"loss": 2.0067,
"step": 700
},
{
"epoch": 1.0973724884080371,
"grad_norm": 3.65625,
"learning_rate": 3.6672051354211035e-05,
"loss": 2.2291,
"step": 710
},
{
"epoch": 1.1128284389489953,
"grad_norm": 3.421875,
"learning_rate": 3.6301397159356696e-05,
"loss": 2.0544,
"step": 720
},
{
"epoch": 1.1282843894899537,
"grad_norm": 4.3125,
"learning_rate": 3.592759389272389e-05,
"loss": 1.9978,
"step": 730
},
{
"epoch": 1.1437403400309119,
"grad_norm": 4.1875,
"learning_rate": 3.555074571252431e-05,
"loss": 1.9575,
"step": 740
},
{
"epoch": 1.1591962905718702,
"grad_norm": 4.34375,
"learning_rate": 3.517095762541795e-05,
"loss": 2.0236,
"step": 750
},
{
"epoch": 1.1746522411128284,
"grad_norm": 4.34375,
"learning_rate": 3.4788335457253595e-05,
"loss": 1.9811,
"step": 760
},
{
"epoch": 1.1901081916537868,
"grad_norm": 5.25,
"learning_rate": 3.4402985823581046e-05,
"loss": 2.0005,
"step": 770
},
{
"epoch": 1.205564142194745,
"grad_norm": 4.875,
"learning_rate": 3.401501609994326e-05,
"loss": 1.9412,
"step": 780
},
{
"epoch": 1.2210200927357033,
"grad_norm": 4.5625,
"learning_rate": 3.362453439195679e-05,
"loss": 2.0814,
"step": 790
},
{
"epoch": 1.2364760432766615,
"grad_norm": 4.03125,
"learning_rate": 3.323164950518868e-05,
"loss": 1.9947,
"step": 800
},
{
"epoch": 1.2519319938176197,
"grad_norm": 5.71875,
"learning_rate": 3.283647091483849e-05,
"loss": 1.9282,
"step": 810
},
{
"epoch": 1.267387944358578,
"grad_norm": 4.40625,
"learning_rate": 3.243910873523356e-05,
"loss": 1.9283,
"step": 820
},
{
"epoch": 1.2828438948995364,
"grad_norm": 6.84375,
"learning_rate": 3.203967368914631e-05,
"loss": 1.9607,
"step": 830
},
{
"epoch": 1.2982998454404946,
"grad_norm": 6.15625,
"learning_rate": 3.163827707694193e-05,
"loss": 2.0587,
"step": 840
},
{
"epoch": 1.3137557959814528,
"grad_norm": 4.84375,
"learning_rate": 3.123503074556513e-05,
"loss": 2.0544,
"step": 850
},
{
"epoch": 1.3292117465224111,
"grad_norm": 4.40625,
"learning_rate": 3.083004705737468e-05,
"loss": 2.02,
"step": 860
},
{
"epoch": 1.3446676970633695,
"grad_norm": 4.4375,
"learning_rate": 3.0423438858834174e-05,
"loss": 2.0169,
"step": 870
},
{
"epoch": 1.3601236476043277,
"grad_norm": 5.15625,
"learning_rate": 3.0015319449068025e-05,
"loss": 2.0551,
"step": 880
},
{
"epoch": 1.3755795981452859,
"grad_norm": 5.40625,
"learning_rate": 2.960580254829131e-05,
"loss": 1.8902,
"step": 890
},
{
"epoch": 1.3910355486862442,
"grad_norm": 5.28125,
"learning_rate": 2.919500226612224e-05,
"loss": 2.1326,
"step": 900
},
{
"epoch": 1.4064914992272024,
"grad_norm": 5.5625,
"learning_rate": 2.8783033069786132e-05,
"loss": 1.7767,
"step": 910
},
{
"epoch": 1.4219474497681608,
"grad_norm": 4.40625,
"learning_rate": 2.8370009752219788e-05,
"loss": 1.9215,
"step": 920
},
{
"epoch": 1.437403400309119,
"grad_norm": 5.78125,
"learning_rate": 2.795604740008504e-05,
"loss": 1.9508,
"step": 930
},
{
"epoch": 1.4528593508500773,
"grad_norm": 4.59375,
"learning_rate": 2.7541261361700514e-05,
"loss": 2.0011,
"step": 940
},
{
"epoch": 1.4683153013910355,
"grad_norm": 4.34375,
"learning_rate": 2.7125767214900455e-05,
"loss": 2.0049,
"step": 950
},
{
"epoch": 1.4837712519319939,
"grad_norm": 4.875,
"learning_rate": 2.67096807348296e-05,
"loss": 2.0328,
"step": 960
},
{
"epoch": 1.499227202472952,
"grad_norm": 5.375,
"learning_rate": 2.629311786168306e-05,
"loss": 2.0937,
"step": 970
},
{
"epoch": 1.5146831530139102,
"grad_norm": 3.984375,
"learning_rate": 2.5876194668400256e-05,
"loss": 2.0844,
"step": 980
},
{
"epoch": 1.5301391035548686,
"grad_norm": 4.46875,
"learning_rate": 2.545902732832181e-05,
"loss": 1.9383,
"step": 990
},
{
"epoch": 1.545595054095827,
"grad_norm": 4.90625,
"learning_rate": 2.5041732082818503e-05,
"loss": 1.963,
"step": 1000
},
{
"epoch": 1.5610510046367851,
"grad_norm": 4.09375,
"learning_rate": 2.462442520890127e-05,
"loss": 2.0792,
"step": 1010
},
{
"epoch": 1.5765069551777433,
"grad_norm": 5.09375,
"learning_rate": 2.4207222986821194e-05,
"loss": 2.0617,
"step": 1020
},
{
"epoch": 1.5919629057187017,
"grad_norm": 5.3125,
"learning_rate": 2.379024166766876e-05,
"loss": 1.9914,
"step": 1030
},
{
"epoch": 1.60741885625966,
"grad_norm": 5.0,
"learning_rate": 2.3373597440981027e-05,
"loss": 1.9685,
"step": 1040
},
{
"epoch": 1.6228748068006182,
"grad_norm": 5.96875,
"learning_rate": 2.295740640236614e-05,
"loss": 1.9599,
"step": 1050
},
{
"epoch": 1.6383307573415764,
"grad_norm": 6.4375,
"learning_rate": 2.2541784521153875e-05,
"loss": 2.0456,
"step": 1060
},
{
"epoch": 1.6537867078825348,
"grad_norm": 5.625,
"learning_rate": 2.2126847608081434e-05,
"loss": 1.9414,
"step": 1070
},
{
"epoch": 1.6692426584234932,
"grad_norm": 5.25,
"learning_rate": 2.1712711283023482e-05,
"loss": 2.0061,
"step": 1080
},
{
"epoch": 1.6846986089644513,
"grad_norm": 4.96875,
"learning_rate": 2.1299490942775213e-05,
"loss": 2.0977,
"step": 1090
},
{
"epoch": 1.7001545595054095,
"grad_norm": 5.125,
"learning_rate": 2.088730172889777e-05,
"loss": 2.0847,
"step": 1100
},
{
"epoch": 1.7156105100463679,
"grad_norm": 5.65625,
"learning_rate": 2.0476258495634594e-05,
"loss": 2.1084,
"step": 1110
},
{
"epoch": 1.7310664605873263,
"grad_norm": 4.125,
"learning_rate": 2.0066475777908005e-05,
"loss": 1.9382,
"step": 1120
},
{
"epoch": 1.7465224111282844,
"grad_norm": 6.40625,
"learning_rate": 1.9658067759404625e-05,
"loss": 2.0278,
"step": 1130
},
{
"epoch": 1.7619783616692426,
"grad_norm": 5.5625,
"learning_rate": 1.925114824075876e-05,
"loss": 2.0224,
"step": 1140
},
{
"epoch": 1.7774343122102008,
"grad_norm": 4.8125,
"learning_rate": 1.8845830607842438e-05,
"loss": 2.0613,
"step": 1150
},
{
"epoch": 1.7928902627511591,
"grad_norm": 5.375,
"learning_rate": 1.8442227800171125e-05,
"loss": 1.9505,
"step": 1160
},
{
"epoch": 1.8083462132921175,
"grad_norm": 5.46875,
"learning_rate": 1.8040452279433716e-05,
"loss": 1.9729,
"step": 1170
},
{
"epoch": 1.8238021638330757,
"grad_norm": 4.59375,
"learning_rate": 1.7640615998155693e-05,
"loss": 1.9394,
"step": 1180
},
{
"epoch": 1.8392581143740339,
"grad_norm": 4.875,
"learning_rate": 1.7242830368504227e-05,
"loss": 1.8939,
"step": 1190
},
{
"epoch": 1.8547140649149922,
"grad_norm": 3.984375,
"learning_rate": 1.6847206231243718e-05,
"loss": 1.8111,
"step": 1200
},
{
"epoch": 1.8701700154559506,
"grad_norm": 4.9375,
"learning_rate": 1.6453853824850726e-05,
"loss": 1.9524,
"step": 1210
},
{
"epoch": 1.8856259659969088,
"grad_norm": 3.953125,
"learning_rate": 1.606288275479652e-05,
"loss": 2.0018,
"step": 1220
},
{
"epoch": 1.901081916537867,
"grad_norm": 5.15625,
"learning_rate": 1.5674401963006235e-05,
"loss": 2.0096,
"step": 1230
},
{
"epoch": 1.9165378670788253,
"grad_norm": 5.5625,
"learning_rate": 1.5288519697502697e-05,
"loss": 1.9857,
"step": 1240
},
{
"epoch": 1.9319938176197837,
"grad_norm": 6.625,
"learning_rate": 1.4905343482243775e-05,
"loss": 1.9265,
"step": 1250
},
{
"epoch": 1.947449768160742,
"grad_norm": 4.875,
"learning_rate": 1.4524980087161438e-05,
"loss": 2.0875,
"step": 1260
},
{
"epoch": 1.9629057187017,
"grad_norm": 3.484375,
"learning_rate": 1.4147535498410853e-05,
"loss": 2.0144,
"step": 1270
},
{
"epoch": 1.9783616692426584,
"grad_norm": 4.25,
"learning_rate": 1.3773114888837957e-05,
"loss": 1.9803,
"step": 1280
},
{
"epoch": 1.9938176197836168,
"grad_norm": 4.90625,
"learning_rate": 1.3401822588673636e-05,
"loss": 2.0252,
"step": 1290
},
{
"epoch": 2.009273570324575,
"grad_norm": 4.4375,
"learning_rate": 1.3033762056462654e-05,
"loss": 1.7441,
"step": 1300
},
{
"epoch": 2.024729520865533,
"grad_norm": 3.84375,
"learning_rate": 1.2669035850235536e-05,
"loss": 1.58,
"step": 1310
},
{
"epoch": 2.0401854714064913,
"grad_norm": 4.6875,
"learning_rate": 1.230774559893131e-05,
"loss": 1.5253,
"step": 1320
},
{
"epoch": 2.05564142194745,
"grad_norm": 5.46875,
"learning_rate": 1.1949991974079197e-05,
"loss": 1.5205,
"step": 1330
},
{
"epoch": 2.071097372488408,
"grad_norm": 5.1875,
"learning_rate": 1.1595874661746986e-05,
"loss": 1.6223,
"step": 1340
},
{
"epoch": 2.0865533230293662,
"grad_norm": 4.90625,
"learning_rate": 1.1245492334764169e-05,
"loss": 1.5583,
"step": 1350
},
{
"epoch": 2.1020092735703244,
"grad_norm": 4.90625,
"learning_rate": 1.0898942625227168e-05,
"loss": 1.5896,
"step": 1360
},
{
"epoch": 2.117465224111283,
"grad_norm": 6.78125,
"learning_rate": 1.0556322097294835e-05,
"loss": 1.6192,
"step": 1370
},
{
"epoch": 2.132921174652241,
"grad_norm": 5.0,
"learning_rate": 1.0217726220281243e-05,
"loss": 1.6102,
"step": 1380
},
{
"epoch": 2.1483771251931993,
"grad_norm": 5.8125,
"learning_rate": 9.883249342053821e-06,
"loss": 1.5518,
"step": 1390
},
{
"epoch": 2.1638330757341575,
"grad_norm": 5.6875,
"learning_rate": 9.552984662743716e-06,
"loss": 1.4824,
"step": 1400
},
{
"epoch": 2.179289026275116,
"grad_norm": 5.09375,
"learning_rate": 9.227024208776224e-06,
"loss": 1.4147,
"step": 1410
},
{
"epoch": 2.1947449768160743,
"grad_norm": 5.5,
"learning_rate": 8.905458807228013e-06,
"loss": 1.5841,
"step": 1420
},
{
"epoch": 2.2102009273570324,
"grad_norm": 5.09375,
"learning_rate": 8.588378060518738e-06,
"loss": 1.6099,
"step": 1430
},
{
"epoch": 2.2256568778979906,
"grad_norm": 6.6875,
"learning_rate": 8.275870321443802e-06,
"loss": 1.5302,
"step": 1440
},
{
"epoch": 2.2411128284389488,
"grad_norm": 5.21875,
"learning_rate": 7.968022668555334e-06,
"loss": 1.5991,
"step": 1450
},
{
"epoch": 2.2565687789799074,
"grad_norm": 6.8125,
"learning_rate": 7.664920881898236e-06,
"loss": 1.6388,
"step": 1460
},
{
"epoch": 2.2720247295208655,
"grad_norm": 6.78125,
"learning_rate": 7.3666494191080455e-06,
"loss": 1.5273,
"step": 1470
},
{
"epoch": 2.2874806800618237,
"grad_norm": 5.5625,
"learning_rate": 7.073291391877288e-06,
"loss": 1.502,
"step": 1480
},
{
"epoch": 2.3029366306027823,
"grad_norm": 5.9375,
"learning_rate": 6.7849285427968586e-06,
"loss": 1.4714,
"step": 1490
},
{
"epoch": 2.3183925811437405,
"grad_norm": 6.4375,
"learning_rate": 6.5016412225789e-06,
"loss": 1.4667,
"step": 1500
},
{
"epoch": 2.3338485316846986,
"grad_norm": 6.625,
"learning_rate": 6.223508367667538e-06,
"loss": 1.604,
"step": 1510
},
{
"epoch": 2.349304482225657,
"grad_norm": 5.4375,
"learning_rate": 5.9506074782436275e-06,
"loss": 1.6265,
"step": 1520
},
{
"epoch": 2.364760432766615,
"grad_norm": 5.1875,
"learning_rate": 5.683014596629857e-06,
"loss": 1.4042,
"step": 1530
},
{
"epoch": 2.3802163833075736,
"grad_norm": 4.40625,
"learning_rate": 5.420804286101838e-06,
"loss": 1.6431,
"step": 1540
},
{
"epoch": 2.3956723338485317,
"grad_norm": 4.84375,
"learning_rate": 5.164049610111607e-06,
"loss": 1.6615,
"step": 1550
},
{
"epoch": 2.41112828438949,
"grad_norm": 4.15625,
"learning_rate": 4.912822111928767e-06,
"loss": 1.6007,
"step": 1560
},
{
"epoch": 2.426584234930448,
"grad_norm": 6.8125,
"learning_rate": 4.667191794705444e-06,
"loss": 1.5768,
"step": 1570
},
{
"epoch": 2.4420401854714067,
"grad_norm": 7.09375,
"learning_rate": 4.427227101970261e-06,
"loss": 1.6504,
"step": 1580
},
{
"epoch": 2.457496136012365,
"grad_norm": 4.125,
"learning_rate": 4.192994898556921e-06,
"loss": 1.5617,
"step": 1590
},
{
"epoch": 2.472952086553323,
"grad_norm": 4.9375,
"learning_rate": 3.964560451972705e-06,
"loss": 1.4937,
"step": 1600
},
{
"epoch": 2.488408037094281,
"grad_norm": 4.34375,
"learning_rate": 3.7419874142120363e-06,
"loss": 1.5398,
"step": 1610
},
{
"epoch": 2.5038639876352393,
"grad_norm": 7.125,
"learning_rate": 3.525337804020212e-06,
"loss": 1.6331,
"step": 1620
},
{
"epoch": 2.519319938176198,
"grad_norm": 4.65625,
"learning_rate": 3.314671989612195e-06,
"loss": 1.5092,
"step": 1630
},
{
"epoch": 2.534775888717156,
"grad_norm": 6.0,
"learning_rate": 3.110048671851404e-06,
"loss": 1.6244,
"step": 1640
},
{
"epoch": 2.5502318392581143,
"grad_norm": 5.875,
"learning_rate": 2.911524867892956e-06,
"loss": 1.5014,
"step": 1650
},
{
"epoch": 2.565687789799073,
"grad_norm": 5.71875,
"learning_rate": 2.719155895296255e-06,
"loss": 1.5084,
"step": 1660
},
{
"epoch": 2.581143740340031,
"grad_norm": 5.40625,
"learning_rate": 2.5329953566109537e-06,
"loss": 1.6218,
"step": 1670
},
{
"epoch": 2.596599690880989,
"grad_norm": 6.28125,
"learning_rate": 2.353095124440999e-06,
"loss": 1.5937,
"step": 1680
},
{
"epoch": 2.6120556414219473,
"grad_norm": 5.78125,
"learning_rate": 2.17950532699053e-06,
"loss": 1.5456,
"step": 1690
},
{
"epoch": 2.6275115919629055,
"grad_norm": 4.15625,
"learning_rate": 2.012274334095998e-06,
"loss": 1.6362,
"step": 1700
},
{
"epoch": 2.642967542503864,
"grad_norm": 5.28125,
"learning_rate": 1.8514487437481436e-06,
"loss": 1.5103,
"step": 1710
},
{
"epoch": 2.6584234930448223,
"grad_norm": 5.53125,
"learning_rate": 1.6970733691077378e-06,
"loss": 1.5271,
"step": 1720
},
{
"epoch": 2.6738794435857804,
"grad_norm": 5.34375,
"learning_rate": 1.549191226018637e-06,
"loss": 1.5694,
"step": 1730
},
{
"epoch": 2.689335394126739,
"grad_norm": 5.21875,
"learning_rate": 1.4078435210216568e-06,
"loss": 1.564,
"step": 1740
},
{
"epoch": 2.704791344667697,
"grad_norm": 4.8125,
"learning_rate": 1.273069639872601e-06,
"loss": 1.6323,
"step": 1750
},
{
"epoch": 2.7202472952086554,
"grad_norm": 4.75,
"learning_rate": 1.1449071365676356e-06,
"loss": 1.6231,
"step": 1760
},
{
"epoch": 2.7357032457496135,
"grad_norm": 6.125,
"learning_rate": 1.0233917228790828e-06,
"loss": 1.5671,
"step": 1770
},
{
"epoch": 2.7511591962905717,
"grad_norm": 5.53125,
"learning_rate": 9.08557258404516e-07,
"loss": 1.5081,
"step": 1780
},
{
"epoch": 2.76661514683153,
"grad_norm": 4.9375,
"learning_rate": 8.004357411319857e-07,
"loss": 1.5764,
"step": 1790
},
{
"epoch": 2.7820710973724885,
"grad_norm": 6.46875,
"learning_rate": 6.990572985239612e-07,
"loss": 1.5529,
"step": 1800
},
{
"epoch": 2.7975270479134466,
"grad_norm": 5.6875,
"learning_rate": 6.044501791224539e-07,
"loss": 1.5893,
"step": 1810
},
{
"epoch": 2.812982998454405,
"grad_norm": 7.125,
"learning_rate": 5.166407446777572e-07,
"loss": 1.6085,
"step": 1820
},
{
"epoch": 2.8284389489953634,
"grad_norm": 5.25,
"learning_rate": 4.3565346280284305e-07,
"loss": 1.4917,
"step": 1830
},
{
"epoch": 2.8438948995363216,
"grad_norm": 5.3125,
"learning_rate": 3.6151090015565103e-07,
"loss": 1.5961,
"step": 1840
},
{
"epoch": 2.8593508500772797,
"grad_norm": 5.0,
"learning_rate": 2.9423371615096005e-07,
"loss": 1.621,
"step": 1850
},
{
"epoch": 2.874806800618238,
"grad_norm": 4.375,
"learning_rate": 2.3384065720379878e-07,
"loss": 1.5848,
"step": 1860
},
{
"epoch": 2.890262751159196,
"grad_norm": 4.90625,
"learning_rate": 1.803485515058323e-07,
"loss": 1.47,
"step": 1870
},
{
"epoch": 2.9057187017001547,
"grad_norm": 7.21875,
"learning_rate": 1.3377230433630205e-07,
"loss": 1.5546,
"step": 1880
},
{
"epoch": 2.921174652241113,
"grad_norm": 5.6875,
"learning_rate": 9.412489390873414e-08,
"loss": 1.6426,
"step": 1890
},
{
"epoch": 2.936630602782071,
"grad_norm": 5.78125,
"learning_rate": 6.141736775464313e-08,
"loss": 1.5036,
"step": 1900
},
{
"epoch": 2.9520865533230296,
"grad_norm": 7.125,
"learning_rate": 3.5658839645194565e-08,
"loss": 1.5698,
"step": 1910
},
{
"epoch": 2.9675425038639878,
"grad_norm": 5.75,
"learning_rate": 1.6856487051700177e-08,
"loss": 1.5624,
"step": 1920
},
{
"epoch": 2.982998454404946,
"grad_norm": 5.6875,
"learning_rate": 5.015549145664933e-09,
"loss": 1.7195,
"step": 1930
},
{
"epoch": 2.998454404945904,
"grad_norm": 6.15625,
"learning_rate": 1.393253389103677e-10,
"loss": 1.6643,
"step": 1940
},
{
"epoch": 3.0,
"step": 1941,
"total_flos": 7.627263225499238e+16,
"train_loss": 2.1592409425884838,
"train_runtime": 1942.4867,
"train_samples_per_second": 15.985,
"train_steps_per_second": 0.999
}
],
"logging_steps": 10,
"max_steps": 1941,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.627263225499238e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}