d1_science_all_large / trainer_state.json
marianna13's picture
Upload folder using huggingface_hub
1389739 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 215,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023255813953488372,
"grad_norm": 5.434192361984504,
"learning_rate": 3.6363636363636366e-06,
"loss": 0.9705,
"step": 1
},
{
"epoch": 0.046511627906976744,
"grad_norm": 5.477136338528377,
"learning_rate": 7.272727272727273e-06,
"loss": 0.9693,
"step": 2
},
{
"epoch": 0.06976744186046512,
"grad_norm": 4.994172293091364,
"learning_rate": 1.0909090909090909e-05,
"loss": 0.971,
"step": 3
},
{
"epoch": 0.09302325581395349,
"grad_norm": 1.9715768923169905,
"learning_rate": 1.4545454545454546e-05,
"loss": 0.882,
"step": 4
},
{
"epoch": 0.11627906976744186,
"grad_norm": 4.86558025165536,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.9089,
"step": 5
},
{
"epoch": 0.13953488372093023,
"grad_norm": 6.162689558609649,
"learning_rate": 2.1818181818181818e-05,
"loss": 0.9031,
"step": 6
},
{
"epoch": 0.16279069767441862,
"grad_norm": 5.545748731779107,
"learning_rate": 2.5454545454545457e-05,
"loss": 0.9058,
"step": 7
},
{
"epoch": 0.18604651162790697,
"grad_norm": 3.4785773417590877,
"learning_rate": 2.9090909090909093e-05,
"loss": 0.8423,
"step": 8
},
{
"epoch": 0.20930232558139536,
"grad_norm": 2.16515163827823,
"learning_rate": 3.272727272727273e-05,
"loss": 0.8043,
"step": 9
},
{
"epoch": 0.23255813953488372,
"grad_norm": 2.664754416131028,
"learning_rate": 3.6363636363636364e-05,
"loss": 0.7799,
"step": 10
},
{
"epoch": 0.2558139534883721,
"grad_norm": 1.4902721906661134,
"learning_rate": 4e-05,
"loss": 0.7546,
"step": 11
},
{
"epoch": 0.27906976744186046,
"grad_norm": 2.015625441498336,
"learning_rate": 4.3636363636363636e-05,
"loss": 0.7408,
"step": 12
},
{
"epoch": 0.3023255813953488,
"grad_norm": 1.1406136624119514,
"learning_rate": 4.727272727272728e-05,
"loss": 0.7216,
"step": 13
},
{
"epoch": 0.32558139534883723,
"grad_norm": 1.8195500867443861,
"learning_rate": 5.0909090909090914e-05,
"loss": 0.724,
"step": 14
},
{
"epoch": 0.3488372093023256,
"grad_norm": 1.595476578575812,
"learning_rate": 5.4545454545454546e-05,
"loss": 0.7084,
"step": 15
},
{
"epoch": 0.37209302325581395,
"grad_norm": 1.9850762532377662,
"learning_rate": 5.8181818181818185e-05,
"loss": 0.6992,
"step": 16
},
{
"epoch": 0.3953488372093023,
"grad_norm": 1.3156392269832544,
"learning_rate": 6.181818181818182e-05,
"loss": 0.6969,
"step": 17
},
{
"epoch": 0.4186046511627907,
"grad_norm": 2.165349958578573,
"learning_rate": 6.545454545454546e-05,
"loss": 0.6819,
"step": 18
},
{
"epoch": 0.4418604651162791,
"grad_norm": 1.860669140085515,
"learning_rate": 6.90909090909091e-05,
"loss": 0.6986,
"step": 19
},
{
"epoch": 0.46511627906976744,
"grad_norm": 1.7217756323779048,
"learning_rate": 7.272727272727273e-05,
"loss": 0.6801,
"step": 20
},
{
"epoch": 0.4883720930232558,
"grad_norm": 1.4025893684782413,
"learning_rate": 7.636363636363637e-05,
"loss": 0.6751,
"step": 21
},
{
"epoch": 0.5116279069767442,
"grad_norm": 1.7449603840187862,
"learning_rate": 8e-05,
"loss": 0.673,
"step": 22
},
{
"epoch": 0.5348837209302325,
"grad_norm": 2.482316452405395,
"learning_rate": 7.999470085828896e-05,
"loss": 0.6629,
"step": 23
},
{
"epoch": 0.5581395348837209,
"grad_norm": 1.2207804383595053,
"learning_rate": 7.997880483720098e-05,
"loss": 0.6716,
"step": 24
},
{
"epoch": 0.5813953488372093,
"grad_norm": 2.568593544907844,
"learning_rate": 7.995231614849947e-05,
"loss": 0.671,
"step": 25
},
{
"epoch": 0.6046511627906976,
"grad_norm": 1.502461502116564,
"learning_rate": 7.991524181055018e-05,
"loss": 0.6608,
"step": 26
},
{
"epoch": 0.627906976744186,
"grad_norm": 2.8212929800172915,
"learning_rate": 7.986759164646167e-05,
"loss": 0.673,
"step": 27
},
{
"epoch": 0.6511627906976745,
"grad_norm": 2.5422318457006825,
"learning_rate": 7.980937828148252e-05,
"loss": 0.6916,
"step": 28
},
{
"epoch": 0.6744186046511628,
"grad_norm": 1.6426727549059879,
"learning_rate": 7.974061713965625e-05,
"loss": 0.6487,
"step": 29
},
{
"epoch": 0.6976744186046512,
"grad_norm": 1.8002811870915256,
"learning_rate": 7.966132643973464e-05,
"loss": 0.6429,
"step": 30
},
{
"epoch": 0.7209302325581395,
"grad_norm": 1.4654311466330405,
"learning_rate": 7.95715271903504e-05,
"loss": 0.6398,
"step": 31
},
{
"epoch": 0.7441860465116279,
"grad_norm": 1.329866741625803,
"learning_rate": 7.947124318445097e-05,
"loss": 0.6398,
"step": 32
},
{
"epoch": 0.7674418604651163,
"grad_norm": 1.4731638464740446,
"learning_rate": 7.936050099299425e-05,
"loss": 0.6243,
"step": 33
},
{
"epoch": 0.7906976744186046,
"grad_norm": 1.4894523263564694,
"learning_rate": 7.923932995790858e-05,
"loss": 0.6335,
"step": 34
},
{
"epoch": 0.813953488372093,
"grad_norm": 1.173972016468102,
"learning_rate": 7.910776218431821e-05,
"loss": 0.6224,
"step": 35
},
{
"epoch": 0.8372093023255814,
"grad_norm": 1.6637513739112912,
"learning_rate": 7.896583253203703e-05,
"loss": 0.6219,
"step": 36
},
{
"epoch": 0.8604651162790697,
"grad_norm": 1.122406312089788,
"learning_rate": 7.881357860633204e-05,
"loss": 0.6232,
"step": 37
},
{
"epoch": 0.8837209302325582,
"grad_norm": 1.4388855947402022,
"learning_rate": 7.865104074795966e-05,
"loss": 0.5977,
"step": 38
},
{
"epoch": 0.9069767441860465,
"grad_norm": 1.4139997348656486,
"learning_rate": 7.847826202247715e-05,
"loss": 0.6193,
"step": 39
},
{
"epoch": 0.9302325581395349,
"grad_norm": 1.4041482845121371,
"learning_rate": 7.829528820883205e-05,
"loss": 0.604,
"step": 40
},
{
"epoch": 0.9534883720930233,
"grad_norm": 1.1795408353423182,
"learning_rate": 7.810216778723275e-05,
"loss": 0.5909,
"step": 41
},
{
"epoch": 0.9767441860465116,
"grad_norm": 1.0271878945240036,
"learning_rate": 7.789895192630333e-05,
"loss": 0.5927,
"step": 42
},
{
"epoch": 1.0,
"grad_norm": 1.7552637527608157,
"learning_rate": 7.768569446952603e-05,
"loss": 0.5843,
"step": 43
},
{
"epoch": 1.0232558139534884,
"grad_norm": 0.9212131281614243,
"learning_rate": 7.746245192097509e-05,
"loss": 0.5722,
"step": 44
},
{
"epoch": 1.0465116279069768,
"grad_norm": 1.6756296752481579,
"learning_rate": 7.722928343034551e-05,
"loss": 0.5773,
"step": 45
},
{
"epoch": 1.069767441860465,
"grad_norm": 1.3543712007327793,
"learning_rate": 7.698625077728103e-05,
"loss": 0.5672,
"step": 46
},
{
"epoch": 1.0930232558139534,
"grad_norm": 1.7410081511463789,
"learning_rate": 7.67334183550051e-05,
"loss": 0.5674,
"step": 47
},
{
"epoch": 1.1162790697674418,
"grad_norm": 1.0733210547826335,
"learning_rate": 7.647085315325946e-05,
"loss": 0.5671,
"step": 48
},
{
"epoch": 1.1395348837209303,
"grad_norm": 2.252268000471556,
"learning_rate": 7.619862474055472e-05,
"loss": 0.5638,
"step": 49
},
{
"epoch": 1.1627906976744187,
"grad_norm": 1.6138509974039352,
"learning_rate": 7.591680524573773e-05,
"loss": 0.5625,
"step": 50
},
{
"epoch": 1.1860465116279069,
"grad_norm": 2.0237798746000193,
"learning_rate": 7.56254693388805e-05,
"loss": 0.5592,
"step": 51
},
{
"epoch": 1.2093023255813953,
"grad_norm": 1.4811712482469028,
"learning_rate": 7.532469421149578e-05,
"loss": 0.5599,
"step": 52
},
{
"epoch": 1.2325581395348837,
"grad_norm": 1.5279703879919382,
"learning_rate": 7.501455955608479e-05,
"loss": 0.5559,
"step": 53
},
{
"epoch": 1.255813953488372,
"grad_norm": 1.2468524365094296,
"learning_rate": 7.469514754502191e-05,
"loss": 0.5549,
"step": 54
},
{
"epoch": 1.2790697674418605,
"grad_norm": 1.2825998428965273,
"learning_rate": 7.436654280878269e-05,
"loss": 0.546,
"step": 55
},
{
"epoch": 1.302325581395349,
"grad_norm": 0.9710691709326068,
"learning_rate": 7.402883241352037e-05,
"loss": 0.5492,
"step": 56
},
{
"epoch": 1.3255813953488373,
"grad_norm": 1.4012298372305019,
"learning_rate": 7.368210583799701e-05,
"loss": 0.5481,
"step": 57
},
{
"epoch": 1.3488372093023255,
"grad_norm": 1.2045446990194086,
"learning_rate": 7.332645494987555e-05,
"loss": 0.544,
"step": 58
},
{
"epoch": 1.372093023255814,
"grad_norm": 1.5478512274120089,
"learning_rate": 7.296197398137878e-05,
"loss": 0.5399,
"step": 59
},
{
"epoch": 1.3953488372093024,
"grad_norm": 1.0482231123455121,
"learning_rate": 7.258875950432185e-05,
"loss": 0.5457,
"step": 60
},
{
"epoch": 1.4186046511627908,
"grad_norm": 1.350025832651234,
"learning_rate": 7.220691040452491e-05,
"loss": 0.533,
"step": 61
},
{
"epoch": 1.441860465116279,
"grad_norm": 1.6379778302822026,
"learning_rate": 7.181652785561254e-05,
"loss": 0.5467,
"step": 62
},
{
"epoch": 1.4651162790697674,
"grad_norm": 0.7697335505845303,
"learning_rate": 7.141771529220716e-05,
"loss": 0.536,
"step": 63
},
{
"epoch": 1.4883720930232558,
"grad_norm": 1.019518212307104,
"learning_rate": 7.101057838252324e-05,
"loss": 0.5309,
"step": 64
},
{
"epoch": 1.5116279069767442,
"grad_norm": 1.1970177274801466,
"learning_rate": 7.05952250003698e-05,
"loss": 0.535,
"step": 65
},
{
"epoch": 1.5348837209302326,
"grad_norm": 1.0203220373942377,
"learning_rate": 7.017176519656845e-05,
"loss": 0.5378,
"step": 66
},
{
"epoch": 1.558139534883721,
"grad_norm": 1.0958072090841686,
"learning_rate": 6.974031116979466e-05,
"loss": 0.5407,
"step": 67
},
{
"epoch": 1.5813953488372094,
"grad_norm": 1.3397310047597761,
"learning_rate": 6.93009772368499e-05,
"loss": 0.5375,
"step": 68
},
{
"epoch": 1.6046511627906976,
"grad_norm": 0.8782131687864965,
"learning_rate": 6.885387980237262e-05,
"loss": 0.5251,
"step": 69
},
{
"epoch": 1.627906976744186,
"grad_norm": 1.4357177318604477,
"learning_rate": 6.839913732799604e-05,
"loss": 0.5274,
"step": 70
},
{
"epoch": 1.6511627906976745,
"grad_norm": 1.215047188865305,
"learning_rate": 6.793687030096085e-05,
"loss": 0.5295,
"step": 71
},
{
"epoch": 1.6744186046511627,
"grad_norm": 1.2710521920417013,
"learning_rate": 6.746720120219126e-05,
"loss": 0.5421,
"step": 72
},
{
"epoch": 1.697674418604651,
"grad_norm": 0.7813066526911904,
"learning_rate": 6.699025447384284e-05,
"loss": 0.5312,
"step": 73
},
{
"epoch": 1.7209302325581395,
"grad_norm": 1.2496179982961972,
"learning_rate": 6.650615648633075e-05,
"loss": 0.5246,
"step": 74
},
{
"epoch": 1.744186046511628,
"grad_norm": 1.311820641764223,
"learning_rate": 6.601503550484684e-05,
"loss": 0.5258,
"step": 75
},
{
"epoch": 1.7674418604651163,
"grad_norm": 1.0893635673008315,
"learning_rate": 6.5517021655375e-05,
"loss": 0.5271,
"step": 76
},
{
"epoch": 1.7906976744186047,
"grad_norm": 0.7546199150125754,
"learning_rate": 6.501224689021341e-05,
"loss": 0.5185,
"step": 77
},
{
"epoch": 1.8139534883720931,
"grad_norm": 1.1638269494753801,
"learning_rate": 6.450084495301268e-05,
"loss": 0.5169,
"step": 78
},
{
"epoch": 1.8372093023255816,
"grad_norm": 1.1455758960103528,
"learning_rate": 6.398295134333962e-05,
"loss": 0.5197,
"step": 79
},
{
"epoch": 1.8604651162790697,
"grad_norm": 0.9787313133874568,
"learning_rate": 6.345870328077569e-05,
"loss": 0.5189,
"step": 80
},
{
"epoch": 1.8837209302325582,
"grad_norm": 1.2146657230893994,
"learning_rate": 6.292823966855965e-05,
"loss": 0.5184,
"step": 81
},
{
"epoch": 1.9069767441860463,
"grad_norm": 0.8315295395061865,
"learning_rate": 6.239170105678419e-05,
"loss": 0.5151,
"step": 82
},
{
"epoch": 1.9302325581395348,
"grad_norm": 0.43739239970652544,
"learning_rate": 6.184922960515616e-05,
"loss": 0.5088,
"step": 83
},
{
"epoch": 1.9534883720930232,
"grad_norm": 0.8529923746757689,
"learning_rate": 6.13009690453304e-05,
"loss": 0.5212,
"step": 84
},
{
"epoch": 1.9767441860465116,
"grad_norm": 0.7915534358179345,
"learning_rate": 6.074706464282695e-05,
"loss": 0.5081,
"step": 85
},
{
"epoch": 2.0,
"grad_norm": 0.838942938146,
"learning_rate": 6.018766315854196e-05,
"loss": 0.4843,
"step": 86
},
{
"epoch": 2.0232558139534884,
"grad_norm": 1.0664271997362778,
"learning_rate": 5.9622912809862384e-05,
"loss": 0.4787,
"step": 87
},
{
"epoch": 2.046511627906977,
"grad_norm": 1.4645846831681002,
"learning_rate": 5.905296323139467e-05,
"loss": 0.4779,
"step": 88
},
{
"epoch": 2.0697674418604652,
"grad_norm": 1.0222504468766858,
"learning_rate": 5.847796543531801e-05,
"loss": 0.4708,
"step": 89
},
{
"epoch": 2.0930232558139537,
"grad_norm": 1.446291157107826,
"learning_rate": 5.789807177137269e-05,
"loss": 0.4747,
"step": 90
},
{
"epoch": 2.116279069767442,
"grad_norm": 0.894564054089064,
"learning_rate": 5.731343588649382e-05,
"loss": 0.4707,
"step": 91
},
{
"epoch": 2.13953488372093,
"grad_norm": 1.1812623112632408,
"learning_rate": 5.6724212684101567e-05,
"loss": 0.4724,
"step": 92
},
{
"epoch": 2.1627906976744184,
"grad_norm": 1.1141414953554294,
"learning_rate": 5.613055828305838e-05,
"loss": 0.4659,
"step": 93
},
{
"epoch": 2.186046511627907,
"grad_norm": 0.7770582052783432,
"learning_rate": 5.553262997630419e-05,
"loss": 0.4592,
"step": 94
},
{
"epoch": 2.2093023255813953,
"grad_norm": 1.0201596899389407,
"learning_rate": 5.493058618918051e-05,
"loss": 0.464,
"step": 95
},
{
"epoch": 2.2325581395348837,
"grad_norm": 0.99732039808284,
"learning_rate": 5.432458643745456e-05,
"loss": 0.4728,
"step": 96
},
{
"epoch": 2.255813953488372,
"grad_norm": 0.9946765474703833,
"learning_rate": 5.37147912850544e-05,
"loss": 0.4719,
"step": 97
},
{
"epoch": 2.2790697674418605,
"grad_norm": 0.67376712726853,
"learning_rate": 5.31013623015264e-05,
"loss": 0.4635,
"step": 98
},
{
"epoch": 2.302325581395349,
"grad_norm": 0.5331380825906256,
"learning_rate": 5.2484462019226216e-05,
"loss": 0.4636,
"step": 99
},
{
"epoch": 2.3255813953488373,
"grad_norm": 0.8114012150052442,
"learning_rate": 5.1864253890254744e-05,
"loss": 0.4669,
"step": 100
},
{
"epoch": 2.3488372093023258,
"grad_norm": 0.7817511193460437,
"learning_rate": 5.1240902243150245e-05,
"loss": 0.4643,
"step": 101
},
{
"epoch": 2.3720930232558137,
"grad_norm": 0.5742798063922762,
"learning_rate": 5.0614572239348436e-05,
"loss": 0.468,
"step": 102
},
{
"epoch": 2.395348837209302,
"grad_norm": 0.7545379109683643,
"learning_rate": 4.998542982942169e-05,
"loss": 0.4646,
"step": 103
},
{
"epoch": 2.4186046511627906,
"grad_norm": 0.6867300007616275,
"learning_rate": 4.9353641709109366e-05,
"loss": 0.466,
"step": 104
},
{
"epoch": 2.441860465116279,
"grad_norm": 0.5994645048448306,
"learning_rate": 4.87193752751505e-05,
"loss": 0.4605,
"step": 105
},
{
"epoch": 2.4651162790697674,
"grad_norm": 0.5648107826549134,
"learning_rate": 4.808279858093088e-05,
"loss": 0.4633,
"step": 106
},
{
"epoch": 2.488372093023256,
"grad_norm": 0.41656800662182036,
"learning_rate": 4.744408029195616e-05,
"loss": 0.458,
"step": 107
},
{
"epoch": 2.511627906976744,
"grad_norm": 0.560471313591134,
"learning_rate": 4.680338964116267e-05,
"loss": 0.4694,
"step": 108
},
{
"epoch": 2.5348837209302326,
"grad_norm": 0.3737750854348531,
"learning_rate": 4.616089638407798e-05,
"loss": 0.4572,
"step": 109
},
{
"epoch": 2.558139534883721,
"grad_norm": 0.4130815840573508,
"learning_rate": 4.551677075384296e-05,
"loss": 0.4642,
"step": 110
},
{
"epoch": 2.5813953488372094,
"grad_norm": 0.45613348607122495,
"learning_rate": 4.487118341610736e-05,
"loss": 0.4558,
"step": 111
},
{
"epoch": 2.604651162790698,
"grad_norm": 0.3913887000750076,
"learning_rate": 4.422430542381064e-05,
"loss": 0.4646,
"step": 112
},
{
"epoch": 2.6279069767441863,
"grad_norm": 0.30374911049832554,
"learning_rate": 4.3576308171860336e-05,
"loss": 0.4624,
"step": 113
},
{
"epoch": 2.6511627906976747,
"grad_norm": 0.3917025778134578,
"learning_rate": 4.292736335171978e-05,
"loss": 0.4566,
"step": 114
},
{
"epoch": 2.6744186046511627,
"grad_norm": 0.3717043272468876,
"learning_rate": 4.2277642905917205e-05,
"loss": 0.4566,
"step": 115
},
{
"epoch": 2.697674418604651,
"grad_norm": 0.30603425852066757,
"learning_rate": 4.162731898248834e-05,
"loss": 0.4599,
"step": 116
},
{
"epoch": 2.7209302325581395,
"grad_norm": 0.3251855900379686,
"learning_rate": 4.097656388936462e-05,
"loss": 0.4623,
"step": 117
},
{
"epoch": 2.744186046511628,
"grad_norm": 0.33442925355720576,
"learning_rate": 4.032555004871892e-05,
"loss": 0.4587,
"step": 118
},
{
"epoch": 2.7674418604651163,
"grad_norm": 0.33761110069481104,
"learning_rate": 3.9674449951281095e-05,
"loss": 0.4576,
"step": 119
},
{
"epoch": 2.7906976744186047,
"grad_norm": 0.26743941116764636,
"learning_rate": 3.902343611063539e-05,
"loss": 0.4556,
"step": 120
},
{
"epoch": 2.813953488372093,
"grad_norm": 0.2128179360983353,
"learning_rate": 3.837268101751166e-05,
"loss": 0.4558,
"step": 121
},
{
"epoch": 2.8372093023255816,
"grad_norm": 0.2896189154702043,
"learning_rate": 3.7722357094082815e-05,
"loss": 0.4618,
"step": 122
},
{
"epoch": 2.8604651162790695,
"grad_norm": 0.21296934932760417,
"learning_rate": 3.707263664828023e-05,
"loss": 0.4591,
"step": 123
},
{
"epoch": 2.883720930232558,
"grad_norm": 0.2791061519264039,
"learning_rate": 3.6423691828139685e-05,
"loss": 0.4524,
"step": 124
},
{
"epoch": 2.9069767441860463,
"grad_norm": 0.2631221143960164,
"learning_rate": 3.577569457618937e-05,
"loss": 0.4606,
"step": 125
},
{
"epoch": 2.9302325581395348,
"grad_norm": 0.20684807194617985,
"learning_rate": 3.512881658389265e-05,
"loss": 0.4578,
"step": 126
},
{
"epoch": 2.953488372093023,
"grad_norm": 0.2697328366910901,
"learning_rate": 3.448322924615703e-05,
"loss": 0.4544,
"step": 127
},
{
"epoch": 2.9767441860465116,
"grad_norm": 0.22910217651885814,
"learning_rate": 3.383910361592204e-05,
"loss": 0.458,
"step": 128
},
{
"epoch": 3.0,
"grad_norm": 0.23244831316179806,
"learning_rate": 3.319661035883734e-05,
"loss": 0.427,
"step": 129
},
{
"epoch": 3.0232558139534884,
"grad_norm": 0.20986791210333858,
"learning_rate": 3.2555919708043856e-05,
"loss": 0.4232,
"step": 130
},
{
"epoch": 3.046511627906977,
"grad_norm": 0.2570519418980638,
"learning_rate": 3.1917201419069126e-05,
"loss": 0.4149,
"step": 131
},
{
"epoch": 3.0697674418604652,
"grad_norm": 0.24795179030963221,
"learning_rate": 3.128062472484952e-05,
"loss": 0.4193,
"step": 132
},
{
"epoch": 3.0930232558139537,
"grad_norm": 0.2576172200398066,
"learning_rate": 3.064635829089064e-05,
"loss": 0.4113,
"step": 133
},
{
"epoch": 3.116279069767442,
"grad_norm": 0.22687653576900754,
"learning_rate": 3.0014570170578324e-05,
"loss": 0.417,
"step": 134
},
{
"epoch": 3.13953488372093,
"grad_norm": 0.26047808565271,
"learning_rate": 2.938542776065158e-05,
"loss": 0.4137,
"step": 135
},
{
"epoch": 3.1627906976744184,
"grad_norm": 0.22319212273415287,
"learning_rate": 2.875909775684977e-05,
"loss": 0.4211,
"step": 136
},
{
"epoch": 3.186046511627907,
"grad_norm": 0.22428976031004036,
"learning_rate": 2.8135746109745273e-05,
"loss": 0.4138,
"step": 137
},
{
"epoch": 3.2093023255813953,
"grad_norm": 0.22247453156579614,
"learning_rate": 2.751553798077379e-05,
"loss": 0.4153,
"step": 138
},
{
"epoch": 3.2325581395348837,
"grad_norm": 0.20007992804768981,
"learning_rate": 2.6898637698473605e-05,
"loss": 0.4135,
"step": 139
},
{
"epoch": 3.255813953488372,
"grad_norm": 0.2451508001851349,
"learning_rate": 2.6285208714945615e-05,
"loss": 0.407,
"step": 140
},
{
"epoch": 3.2790697674418605,
"grad_norm": 0.18902377319628208,
"learning_rate": 2.5675413562545453e-05,
"loss": 0.4044,
"step": 141
},
{
"epoch": 3.302325581395349,
"grad_norm": 0.24194497338579668,
"learning_rate": 2.5069413810819503e-05,
"loss": 0.4073,
"step": 142
},
{
"epoch": 3.3255813953488373,
"grad_norm": 0.1989749287423866,
"learning_rate": 2.446737002369582e-05,
"loss": 0.4169,
"step": 143
},
{
"epoch": 3.3488372093023258,
"grad_norm": 0.2095582088688536,
"learning_rate": 2.3869441716941627e-05,
"loss": 0.4204,
"step": 144
},
{
"epoch": 3.3720930232558137,
"grad_norm": 0.162353572736833,
"learning_rate": 2.327578731589843e-05,
"loss": 0.4065,
"step": 145
},
{
"epoch": 3.395348837209302,
"grad_norm": 0.17279337899190159,
"learning_rate": 2.2686564113506188e-05,
"loss": 0.412,
"step": 146
},
{
"epoch": 3.4186046511627906,
"grad_norm": 0.16979965281268794,
"learning_rate": 2.210192822862733e-05,
"loss": 0.4097,
"step": 147
},
{
"epoch": 3.441860465116279,
"grad_norm": 0.1394513868982289,
"learning_rate": 2.1522034564681997e-05,
"loss": 0.4136,
"step": 148
},
{
"epoch": 3.4651162790697674,
"grad_norm": 0.15491336187962532,
"learning_rate": 2.0947036768605343e-05,
"loss": 0.4138,
"step": 149
},
{
"epoch": 3.488372093023256,
"grad_norm": 0.15331640718626022,
"learning_rate": 2.0377087190137615e-05,
"loss": 0.4121,
"step": 150
},
{
"epoch": 3.511627906976744,
"grad_norm": 0.14333365303471923,
"learning_rate": 1.9812336841458032e-05,
"loss": 0.412,
"step": 151
},
{
"epoch": 3.5348837209302326,
"grad_norm": 0.15342863178771485,
"learning_rate": 1.925293535717306e-05,
"loss": 0.4128,
"step": 152
},
{
"epoch": 3.558139534883721,
"grad_norm": 0.1515728462605629,
"learning_rate": 1.869903095466962e-05,
"loss": 0.4092,
"step": 153
},
{
"epoch": 3.5813953488372094,
"grad_norm": 0.16216900903578507,
"learning_rate": 1.815077039484385e-05,
"loss": 0.418,
"step": 154
},
{
"epoch": 3.604651162790698,
"grad_norm": 0.14923319773401145,
"learning_rate": 1.760829894321582e-05,
"loss": 0.4124,
"step": 155
},
{
"epoch": 3.6279069767441863,
"grad_norm": 0.1388357623921274,
"learning_rate": 1.7071760331440352e-05,
"loss": 0.4134,
"step": 156
},
{
"epoch": 3.6511627906976747,
"grad_norm": 0.14463436978493738,
"learning_rate": 1.6541296719224315e-05,
"loss": 0.4152,
"step": 157
},
{
"epoch": 3.6744186046511627,
"grad_norm": 0.13948667157444467,
"learning_rate": 1.6017048656660392e-05,
"loss": 0.4127,
"step": 158
},
{
"epoch": 3.697674418604651,
"grad_norm": 0.12962305417153203,
"learning_rate": 1.5499155046987343e-05,
"loss": 0.4102,
"step": 159
},
{
"epoch": 3.7209302325581395,
"grad_norm": 0.13488967836960994,
"learning_rate": 1.49877531097866e-05,
"loss": 0.412,
"step": 160
},
{
"epoch": 3.744186046511628,
"grad_norm": 0.14161978366263686,
"learning_rate": 1.4482978344624999e-05,
"loss": 0.4112,
"step": 161
},
{
"epoch": 3.7674418604651163,
"grad_norm": 0.12104463455571417,
"learning_rate": 1.3984964495153178e-05,
"loss": 0.4107,
"step": 162
},
{
"epoch": 3.7906976744186047,
"grad_norm": 0.14778280010357478,
"learning_rate": 1.349384351366926e-05,
"loss": 0.4103,
"step": 163
},
{
"epoch": 3.813953488372093,
"grad_norm": 0.13974610415305352,
"learning_rate": 1.3009745526157165e-05,
"loss": 0.4089,
"step": 164
},
{
"epoch": 3.8372093023255816,
"grad_norm": 0.12567459622985308,
"learning_rate": 1.2532798797808767e-05,
"loss": 0.4082,
"step": 165
},
{
"epoch": 3.8604651162790695,
"grad_norm": 0.13111313669138286,
"learning_rate": 1.2063129699039169e-05,
"loss": 0.4072,
"step": 166
},
{
"epoch": 3.883720930232558,
"grad_norm": 0.11859583351508307,
"learning_rate": 1.1600862672003964e-05,
"loss": 0.408,
"step": 167
},
{
"epoch": 3.9069767441860463,
"grad_norm": 0.12426128460062202,
"learning_rate": 1.1146120197627375e-05,
"loss": 0.41,
"step": 168
},
{
"epoch": 3.9302325581395348,
"grad_norm": 0.12183542106771499,
"learning_rate": 1.0699022763150104e-05,
"loss": 0.4115,
"step": 169
},
{
"epoch": 3.953488372093023,
"grad_norm": 0.1104533940594946,
"learning_rate": 1.0259688830205348e-05,
"loss": 0.4055,
"step": 170
},
{
"epoch": 3.9767441860465116,
"grad_norm": 0.11121249489433677,
"learning_rate": 9.828234803431559e-06,
"loss": 0.411,
"step": 171
},
{
"epoch": 4.0,
"grad_norm": 0.15736411698947347,
"learning_rate": 9.404774999630204e-06,
"loss": 0.3931,
"step": 172
},
{
"epoch": 4.023255813953488,
"grad_norm": 0.14076644290125107,
"learning_rate": 8.989421617476766e-06,
"loss": 0.3903,
"step": 173
},
{
"epoch": 4.046511627906977,
"grad_norm": 0.11712681895575112,
"learning_rate": 8.582284707792845e-06,
"loss": 0.387,
"step": 174
},
{
"epoch": 4.069767441860465,
"grad_norm": 0.12595405435756382,
"learning_rate": 8.183472144387456e-06,
"loss": 0.3866,
"step": 175
},
{
"epoch": 4.093023255813954,
"grad_norm": 0.13565427155194576,
"learning_rate": 7.793089595475094e-06,
"loss": 0.3913,
"step": 176
},
{
"epoch": 4.116279069767442,
"grad_norm": 0.1412644950291927,
"learning_rate": 7.411240495678158e-06,
"loss": 0.3876,
"step": 177
},
{
"epoch": 4.1395348837209305,
"grad_norm": 0.13949397861852225,
"learning_rate": 7.038026018621234e-06,
"loss": 0.3839,
"step": 178
},
{
"epoch": 4.162790697674419,
"grad_norm": 0.12339126854002234,
"learning_rate": 6.673545050124466e-06,
"loss": 0.3894,
"step": 179
},
{
"epoch": 4.186046511627907,
"grad_norm": 0.12932008059607125,
"learning_rate": 6.317894162003004e-06,
"loss": 0.3863,
"step": 180
},
{
"epoch": 4.209302325581396,
"grad_norm": 0.12611084979552667,
"learning_rate": 5.971167586479638e-06,
"loss": 0.3878,
"step": 181
},
{
"epoch": 4.232558139534884,
"grad_norm": 0.12145402717998689,
"learning_rate": 5.633457191217311e-06,
"loss": 0.3806,
"step": 182
},
{
"epoch": 4.2558139534883725,
"grad_norm": 0.12362886923322695,
"learning_rate": 5.3048524549781066e-06,
"loss": 0.3833,
"step": 183
},
{
"epoch": 4.27906976744186,
"grad_norm": 0.1114983032825903,
"learning_rate": 4.985440443915223e-06,
"loss": 0.3873,
"step": 184
},
{
"epoch": 4.3023255813953485,
"grad_norm": 0.11722293815194844,
"learning_rate": 4.6753057885042186e-06,
"loss": 0.3827,
"step": 185
},
{
"epoch": 4.325581395348837,
"grad_norm": 0.1250948440370564,
"learning_rate": 4.374530661119512e-06,
"loss": 0.3855,
"step": 186
},
{
"epoch": 4.348837209302325,
"grad_norm": 0.1106181593146437,
"learning_rate": 4.0831947542622655e-06,
"loss": 0.3855,
"step": 187
},
{
"epoch": 4.372093023255814,
"grad_norm": 0.10428284018843,
"learning_rate": 3.8013752594452834e-06,
"loss": 0.3862,
"step": 188
},
{
"epoch": 4.395348837209302,
"grad_norm": 0.1049647850048849,
"learning_rate": 3.529146846740559e-06,
"loss": 0.3853,
"step": 189
},
{
"epoch": 4.4186046511627906,
"grad_norm": 0.10140234914167741,
"learning_rate": 3.266581644994915e-06,
"loss": 0.3832,
"step": 190
},
{
"epoch": 4.441860465116279,
"grad_norm": 0.1084178720824487,
"learning_rate": 3.0137492227189803e-06,
"loss": 0.3874,
"step": 191
},
{
"epoch": 4.465116279069767,
"grad_norm": 0.1035454061342504,
"learning_rate": 2.7707165696545035e-06,
"loss": 0.3846,
"step": 192
},
{
"epoch": 4.488372093023256,
"grad_norm": 0.10465143616293164,
"learning_rate": 2.537548079024923e-06,
"loss": 0.3875,
"step": 193
},
{
"epoch": 4.511627906976744,
"grad_norm": 0.10287343108965315,
"learning_rate": 2.3143055304739725e-06,
"loss": 0.3878,
"step": 194
},
{
"epoch": 4.534883720930233,
"grad_norm": 0.09899078580945446,
"learning_rate": 2.1010480736966788e-06,
"loss": 0.3844,
"step": 195
},
{
"epoch": 4.558139534883721,
"grad_norm": 0.0947795487504229,
"learning_rate": 1.8978322127672522e-06,
"loss": 0.3864,
"step": 196
},
{
"epoch": 4.5813953488372094,
"grad_norm": 0.09351592903837026,
"learning_rate": 1.7047117911679567e-06,
"loss": 0.3764,
"step": 197
},
{
"epoch": 4.604651162790698,
"grad_norm": 0.09075701229280372,
"learning_rate": 1.5217379775228503e-06,
"loss": 0.3804,
"step": 198
},
{
"epoch": 4.627906976744186,
"grad_norm": 0.09563361674368318,
"learning_rate": 1.3489592520403405e-06,
"loss": 0.3852,
"step": 199
},
{
"epoch": 4.651162790697675,
"grad_norm": 0.09593704632301235,
"learning_rate": 1.186421393667967e-06,
"loss": 0.3802,
"step": 200
},
{
"epoch": 4.674418604651163,
"grad_norm": 0.09257390445253719,
"learning_rate": 1.034167467962983e-06,
"loss": 0.3867,
"step": 201
},
{
"epoch": 4.6976744186046515,
"grad_norm": 0.09289347063379441,
"learning_rate": 8.922378156817957e-07,
"loss": 0.3829,
"step": 202
},
{
"epoch": 4.720930232558139,
"grad_norm": 0.09257169253912409,
"learning_rate": 7.606700420914381e-07,
"loss": 0.3821,
"step": 203
},
{
"epoch": 4.7441860465116275,
"grad_norm": 0.09310627098479367,
"learning_rate": 6.39499007005746e-07,
"loss": 0.3903,
"step": 204
},
{
"epoch": 4.767441860465116,
"grad_norm": 0.08944939258724487,
"learning_rate": 5.287568155490341e-07,
"loss": 0.3885,
"step": 205
},
{
"epoch": 4.790697674418604,
"grad_norm": 0.09592002472628403,
"learning_rate": 4.284728096496027e-07,
"loss": 0.3875,
"step": 206
},
{
"epoch": 4.813953488372093,
"grad_norm": 0.09180161205563094,
"learning_rate": 3.3867356026537457e-07,
"loss": 0.3855,
"step": 207
},
{
"epoch": 4.837209302325581,
"grad_norm": 0.08528621258416194,
"learning_rate": 2.593828603437487e-07,
"loss": 0.3846,
"step": 208
},
{
"epoch": 4.8604651162790695,
"grad_norm": 0.08815943836016799,
"learning_rate": 1.906217185174919e-07,
"loss": 0.3839,
"step": 209
},
{
"epoch": 4.883720930232558,
"grad_norm": 0.08604030139119473,
"learning_rate": 1.3240835353834247e-07,
"loss": 0.3892,
"step": 210
},
{
"epoch": 4.906976744186046,
"grad_norm": 0.0865890927390752,
"learning_rate": 8.475818944982728e-08,
"loss": 0.3835,
"step": 211
},
{
"epoch": 4.930232558139535,
"grad_norm": 0.09058754572938608,
"learning_rate": 4.7683851500548437e-08,
"loss": 0.3788,
"step": 212
},
{
"epoch": 4.953488372093023,
"grad_norm": 0.08787858276738178,
"learning_rate": 2.1195162799032554e-08,
"loss": 0.3903,
"step": 213
},
{
"epoch": 4.976744186046512,
"grad_norm": 0.08813542305445697,
"learning_rate": 5.299141711043732e-09,
"loss": 0.3865,
"step": 214
},
{
"epoch": 5.0,
"grad_norm": 0.09709093222975328,
"learning_rate": 0.0,
"loss": 0.379,
"step": 215
},
{
"epoch": 5.0,
"step": 215,
"total_flos": 3606870585507840.0,
"train_loss": 0.5023871626964835,
"train_runtime": 3261.3525,
"train_samples_per_second": 33.201,
"train_steps_per_second": 0.066
}
],
"logging_steps": 1,
"max_steps": 215,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3606870585507840.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}