sedrickkeh's picture
End of training
dc4eb71 verified
raw
history blame
41.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9936034115138592,
"eval_steps": 500,
"global_step": 234,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01279317697228145,
"grad_norm": 6.832852840423584,
"learning_rate": 4.1666666666666667e-07,
"loss": 1.0868,
"step": 1
},
{
"epoch": 0.0255863539445629,
"grad_norm": 6.904089450836182,
"learning_rate": 8.333333333333333e-07,
"loss": 1.1044,
"step": 2
},
{
"epoch": 0.03837953091684435,
"grad_norm": 6.991506576538086,
"learning_rate": 1.25e-06,
"loss": 1.1401,
"step": 3
},
{
"epoch": 0.0511727078891258,
"grad_norm": 6.811994552612305,
"learning_rate": 1.6666666666666667e-06,
"loss": 1.1168,
"step": 4
},
{
"epoch": 0.06396588486140725,
"grad_norm": 6.493495464324951,
"learning_rate": 2.0833333333333334e-06,
"loss": 1.0934,
"step": 5
},
{
"epoch": 0.0767590618336887,
"grad_norm": 5.279635429382324,
"learning_rate": 2.5e-06,
"loss": 1.081,
"step": 6
},
{
"epoch": 0.08955223880597014,
"grad_norm": 4.791995525360107,
"learning_rate": 2.916666666666667e-06,
"loss": 1.0356,
"step": 7
},
{
"epoch": 0.1023454157782516,
"grad_norm": 2.9714953899383545,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.9912,
"step": 8
},
{
"epoch": 0.11513859275053305,
"grad_norm": 2.6682045459747314,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.947,
"step": 9
},
{
"epoch": 0.1279317697228145,
"grad_norm": 2.5980541706085205,
"learning_rate": 4.166666666666667e-06,
"loss": 0.9328,
"step": 10
},
{
"epoch": 0.14072494669509594,
"grad_norm": 4.253756999969482,
"learning_rate": 4.583333333333333e-06,
"loss": 0.9871,
"step": 11
},
{
"epoch": 0.1535181236673774,
"grad_norm": 4.215671062469482,
"learning_rate": 5e-06,
"loss": 0.9252,
"step": 12
},
{
"epoch": 0.16631130063965885,
"grad_norm": 4.390238285064697,
"learning_rate": 5.416666666666667e-06,
"loss": 0.9241,
"step": 13
},
{
"epoch": 0.1791044776119403,
"grad_norm": 3.742227077484131,
"learning_rate": 5.833333333333334e-06,
"loss": 0.9443,
"step": 14
},
{
"epoch": 0.19189765458422176,
"grad_norm": 3.979121446609497,
"learning_rate": 6.25e-06,
"loss": 0.946,
"step": 15
},
{
"epoch": 0.2046908315565032,
"grad_norm": 2.8892107009887695,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8909,
"step": 16
},
{
"epoch": 0.21748400852878466,
"grad_norm": 2.102924346923828,
"learning_rate": 7.083333333333335e-06,
"loss": 0.8488,
"step": 17
},
{
"epoch": 0.2302771855010661,
"grad_norm": 2.0470516681671143,
"learning_rate": 7.500000000000001e-06,
"loss": 0.9146,
"step": 18
},
{
"epoch": 0.24307036247334754,
"grad_norm": 1.9300282001495361,
"learning_rate": 7.916666666666667e-06,
"loss": 0.8769,
"step": 19
},
{
"epoch": 0.255863539445629,
"grad_norm": 1.6612228155136108,
"learning_rate": 8.333333333333334e-06,
"loss": 0.8631,
"step": 20
},
{
"epoch": 0.26865671641791045,
"grad_norm": 1.4431664943695068,
"learning_rate": 8.750000000000001e-06,
"loss": 0.8269,
"step": 21
},
{
"epoch": 0.2814498933901919,
"grad_norm": 1.2628264427185059,
"learning_rate": 9.166666666666666e-06,
"loss": 0.8353,
"step": 22
},
{
"epoch": 0.2942430703624733,
"grad_norm": 1.1078248023986816,
"learning_rate": 9.583333333333335e-06,
"loss": 0.8076,
"step": 23
},
{
"epoch": 0.3070362473347548,
"grad_norm": 1.111648440361023,
"learning_rate": 1e-05,
"loss": 0.9005,
"step": 24
},
{
"epoch": 0.31982942430703626,
"grad_norm": 0.8681544065475464,
"learning_rate": 9.999440509051367e-06,
"loss": 0.8255,
"step": 25
},
{
"epoch": 0.3326226012793177,
"grad_norm": 1.1207367181777954,
"learning_rate": 9.997762161417517e-06,
"loss": 0.8363,
"step": 26
},
{
"epoch": 0.34541577825159914,
"grad_norm": 0.9773739576339722,
"learning_rate": 9.994965332706574e-06,
"loss": 0.7949,
"step": 27
},
{
"epoch": 0.3582089552238806,
"grad_norm": 0.7835165858268738,
"learning_rate": 9.991050648838676e-06,
"loss": 0.7652,
"step": 28
},
{
"epoch": 0.37100213219616207,
"grad_norm": 1.0131946802139282,
"learning_rate": 9.986018985905901e-06,
"loss": 0.8445,
"step": 29
},
{
"epoch": 0.3837953091684435,
"grad_norm": 0.8833023905754089,
"learning_rate": 9.979871469976197e-06,
"loss": 0.7808,
"step": 30
},
{
"epoch": 0.39658848614072495,
"grad_norm": 0.8186467289924622,
"learning_rate": 9.972609476841368e-06,
"loss": 0.8133,
"step": 31
},
{
"epoch": 0.4093816631130064,
"grad_norm": 0.8074617385864258,
"learning_rate": 9.964234631709188e-06,
"loss": 0.7957,
"step": 32
},
{
"epoch": 0.42217484008528783,
"grad_norm": 0.8177934288978577,
"learning_rate": 9.954748808839675e-06,
"loss": 0.8108,
"step": 33
},
{
"epoch": 0.4349680170575693,
"grad_norm": 0.5792050957679749,
"learning_rate": 9.944154131125643e-06,
"loss": 0.7906,
"step": 34
},
{
"epoch": 0.44776119402985076,
"grad_norm": 0.7301996946334839,
"learning_rate": 9.932452969617607e-06,
"loss": 0.8191,
"step": 35
},
{
"epoch": 0.4605543710021322,
"grad_norm": 0.6449064612388611,
"learning_rate": 9.91964794299315e-06,
"loss": 0.7929,
"step": 36
},
{
"epoch": 0.47334754797441364,
"grad_norm": 0.6409065127372742,
"learning_rate": 9.905741916970863e-06,
"loss": 0.853,
"step": 37
},
{
"epoch": 0.4861407249466951,
"grad_norm": 0.5154856443405151,
"learning_rate": 9.890738003669029e-06,
"loss": 0.7733,
"step": 38
},
{
"epoch": 0.4989339019189765,
"grad_norm": 0.6274257302284241,
"learning_rate": 9.874639560909118e-06,
"loss": 0.8247,
"step": 39
},
{
"epoch": 0.511727078891258,
"grad_norm": 0.636122465133667,
"learning_rate": 9.857450191464337e-06,
"loss": 0.8102,
"step": 40
},
{
"epoch": 0.5245202558635395,
"grad_norm": 0.5821788311004639,
"learning_rate": 9.839173742253334e-06,
"loss": 0.7774,
"step": 41
},
{
"epoch": 0.5373134328358209,
"grad_norm": 0.5190237760543823,
"learning_rate": 9.819814303479268e-06,
"loss": 0.766,
"step": 42
},
{
"epoch": 0.5501066098081023,
"grad_norm": 0.49539390206336975,
"learning_rate": 9.799376207714446e-06,
"loss": 0.7937,
"step": 43
},
{
"epoch": 0.5628997867803838,
"grad_norm": 0.5138888359069824,
"learning_rate": 9.777864028930705e-06,
"loss": 0.7861,
"step": 44
},
{
"epoch": 0.5756929637526652,
"grad_norm": 0.49708378314971924,
"learning_rate": 9.755282581475769e-06,
"loss": 0.7356,
"step": 45
},
{
"epoch": 0.5884861407249466,
"grad_norm": 0.5589980483055115,
"learning_rate": 9.731636918995821e-06,
"loss": 0.7157,
"step": 46
},
{
"epoch": 0.6012793176972282,
"grad_norm": 0.5140479207038879,
"learning_rate": 9.706932333304518e-06,
"loss": 0.7599,
"step": 47
},
{
"epoch": 0.6140724946695096,
"grad_norm": 0.5195008516311646,
"learning_rate": 9.681174353198687e-06,
"loss": 0.7896,
"step": 48
},
{
"epoch": 0.6268656716417911,
"grad_norm": 0.510276734828949,
"learning_rate": 9.654368743221022e-06,
"loss": 0.7775,
"step": 49
},
{
"epoch": 0.6396588486140725,
"grad_norm": 0.6014343500137329,
"learning_rate": 9.626521502369984e-06,
"loss": 0.7615,
"step": 50
},
{
"epoch": 0.652452025586354,
"grad_norm": 0.5976372361183167,
"learning_rate": 9.597638862757255e-06,
"loss": 0.8036,
"step": 51
},
{
"epoch": 0.6652452025586354,
"grad_norm": 0.6337112188339233,
"learning_rate": 9.567727288213005e-06,
"loss": 0.8269,
"step": 52
},
{
"epoch": 0.6780383795309168,
"grad_norm": 0.4970976710319519,
"learning_rate": 9.536793472839325e-06,
"loss": 0.7883,
"step": 53
},
{
"epoch": 0.6908315565031983,
"grad_norm": 0.5633640289306641,
"learning_rate": 9.504844339512096e-06,
"loss": 0.7726,
"step": 54
},
{
"epoch": 0.7036247334754797,
"grad_norm": 0.6632335186004639,
"learning_rate": 9.471887038331686e-06,
"loss": 0.7631,
"step": 55
},
{
"epoch": 0.7164179104477612,
"grad_norm": 0.6567376852035522,
"learning_rate": 9.437928945022772e-06,
"loss": 0.7916,
"step": 56
},
{
"epoch": 0.7292110874200426,
"grad_norm": 0.4805288016796112,
"learning_rate": 9.40297765928369e-06,
"loss": 0.7652,
"step": 57
},
{
"epoch": 0.7420042643923241,
"grad_norm": 0.5745772123336792,
"learning_rate": 9.36704100308565e-06,
"loss": 0.7946,
"step": 58
},
{
"epoch": 0.7547974413646056,
"grad_norm": 0.5194308161735535,
"learning_rate": 9.330127018922195e-06,
"loss": 0.7118,
"step": 59
},
{
"epoch": 0.767590618336887,
"grad_norm": 0.4913547933101654,
"learning_rate": 9.292243968009332e-06,
"loss": 0.7412,
"step": 60
},
{
"epoch": 0.7803837953091685,
"grad_norm": 0.5241764187812805,
"learning_rate": 9.253400328436699e-06,
"loss": 0.7634,
"step": 61
},
{
"epoch": 0.7931769722814499,
"grad_norm": 0.5345350503921509,
"learning_rate": 9.213604793270196e-06,
"loss": 0.7353,
"step": 62
},
{
"epoch": 0.8059701492537313,
"grad_norm": 0.5227858424186707,
"learning_rate": 9.172866268606514e-06,
"loss": 0.7697,
"step": 63
},
{
"epoch": 0.8187633262260128,
"grad_norm": 0.4864361584186554,
"learning_rate": 9.131193871579975e-06,
"loss": 0.8112,
"step": 64
},
{
"epoch": 0.8315565031982942,
"grad_norm": 0.5334176421165466,
"learning_rate": 9.088596928322158e-06,
"loss": 0.731,
"step": 65
},
{
"epoch": 0.8443496801705757,
"grad_norm": 0.4496009945869446,
"learning_rate": 9.045084971874738e-06,
"loss": 0.7206,
"step": 66
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.4755212068557739,
"learning_rate": 9.000667740056033e-06,
"loss": 0.7843,
"step": 67
},
{
"epoch": 0.8699360341151386,
"grad_norm": 0.45297691226005554,
"learning_rate": 8.955355173281709e-06,
"loss": 0.76,
"step": 68
},
{
"epoch": 0.8827292110874201,
"grad_norm": 0.4970425069332123,
"learning_rate": 8.90915741234015e-06,
"loss": 0.7674,
"step": 69
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.5083153247833252,
"learning_rate": 8.862084796122998e-06,
"loss": 0.7656,
"step": 70
},
{
"epoch": 0.908315565031983,
"grad_norm": 0.45638445019721985,
"learning_rate": 8.814147859311333e-06,
"loss": 0.7303,
"step": 71
},
{
"epoch": 0.9211087420042644,
"grad_norm": 0.4436575472354889,
"learning_rate": 8.765357330018056e-06,
"loss": 0.7295,
"step": 72
},
{
"epoch": 0.9339019189765458,
"grad_norm": 0.5214686393737793,
"learning_rate": 8.715724127386971e-06,
"loss": 0.7075,
"step": 73
},
{
"epoch": 0.9466950959488273,
"grad_norm": 0.48304834961891174,
"learning_rate": 8.665259359149132e-06,
"loss": 0.7279,
"step": 74
},
{
"epoch": 0.9594882729211087,
"grad_norm": 0.5089297294616699,
"learning_rate": 8.613974319136959e-06,
"loss": 0.7424,
"step": 75
},
{
"epoch": 0.9722814498933902,
"grad_norm": 0.49402785301208496,
"learning_rate": 8.561880484756726e-06,
"loss": 0.7957,
"step": 76
},
{
"epoch": 0.9850746268656716,
"grad_norm": 0.5247993469238281,
"learning_rate": 8.508989514419959e-06,
"loss": 0.7539,
"step": 77
},
{
"epoch": 0.997867803837953,
"grad_norm": 0.4710896611213684,
"learning_rate": 8.455313244934324e-06,
"loss": 0.7057,
"step": 78
},
{
"epoch": 1.0106609808102345,
"grad_norm": 1.7053124904632568,
"learning_rate": 8.400863688854598e-06,
"loss": 1.2963,
"step": 79
},
{
"epoch": 1.023454157782516,
"grad_norm": 0.5480591654777527,
"learning_rate": 8.345653031794292e-06,
"loss": 0.7255,
"step": 80
},
{
"epoch": 1.0362473347547974,
"grad_norm": 0.5014137029647827,
"learning_rate": 8.289693629698564e-06,
"loss": 0.7831,
"step": 81
},
{
"epoch": 1.049040511727079,
"grad_norm": 0.4693015515804291,
"learning_rate": 8.232998006078998e-06,
"loss": 0.7265,
"step": 82
},
{
"epoch": 1.0618336886993602,
"grad_norm": 0.4948160648345947,
"learning_rate": 8.175578849210894e-06,
"loss": 0.7189,
"step": 83
},
{
"epoch": 1.0746268656716418,
"grad_norm": 0.6815115213394165,
"learning_rate": 8.117449009293668e-06,
"loss": 0.7964,
"step": 84
},
{
"epoch": 1.0874200426439233,
"grad_norm": 0.44852644205093384,
"learning_rate": 8.058621495575032e-06,
"loss": 0.6198,
"step": 85
},
{
"epoch": 1.1002132196162047,
"grad_norm": 0.535753071308136,
"learning_rate": 7.99910947343957e-06,
"loss": 0.6642,
"step": 86
},
{
"epoch": 1.1130063965884862,
"grad_norm": 0.46651574969291687,
"learning_rate": 7.938926261462366e-06,
"loss": 0.731,
"step": 87
},
{
"epoch": 1.1257995735607675,
"grad_norm": 0.5469245910644531,
"learning_rate": 7.87808532842837e-06,
"loss": 0.794,
"step": 88
},
{
"epoch": 1.138592750533049,
"grad_norm": 0.5276331901550293,
"learning_rate": 7.81660029031811e-06,
"loss": 0.6953,
"step": 89
},
{
"epoch": 1.1513859275053304,
"grad_norm": 0.42721840739250183,
"learning_rate": 7.754484907260513e-06,
"loss": 0.7541,
"step": 90
},
{
"epoch": 1.164179104477612,
"grad_norm": 0.4775303602218628,
"learning_rate": 7.691753080453413e-06,
"loss": 0.704,
"step": 91
},
{
"epoch": 1.1769722814498933,
"grad_norm": 0.5940173268318176,
"learning_rate": 7.628418849052523e-06,
"loss": 0.778,
"step": 92
},
{
"epoch": 1.1897654584221748,
"grad_norm": 0.4526679217815399,
"learning_rate": 7.564496387029532e-06,
"loss": 0.7215,
"step": 93
},
{
"epoch": 1.2025586353944564,
"grad_norm": 0.4627590775489807,
"learning_rate": 7.500000000000001e-06,
"loss": 0.7299,
"step": 94
},
{
"epoch": 1.2153518123667377,
"grad_norm": 0.4942200183868408,
"learning_rate": 7.434944122021837e-06,
"loss": 0.7224,
"step": 95
},
{
"epoch": 1.2281449893390193,
"grad_norm": 0.428196519613266,
"learning_rate": 7.369343312364994e-06,
"loss": 0.7617,
"step": 96
},
{
"epoch": 1.2409381663113006,
"grad_norm": 0.4644979238510132,
"learning_rate": 7.303212252253163e-06,
"loss": 0.7441,
"step": 97
},
{
"epoch": 1.2537313432835822,
"grad_norm": 0.5221542119979858,
"learning_rate": 7.236565741578163e-06,
"loss": 0.7311,
"step": 98
},
{
"epoch": 1.2665245202558635,
"grad_norm": 0.43763238191604614,
"learning_rate": 7.169418695587791e-06,
"loss": 0.7698,
"step": 99
},
{
"epoch": 1.279317697228145,
"grad_norm": 0.4271838366985321,
"learning_rate": 7.101786141547829e-06,
"loss": 0.6731,
"step": 100
},
{
"epoch": 1.2921108742004264,
"grad_norm": 0.4979677200317383,
"learning_rate": 7.033683215379002e-06,
"loss": 0.7077,
"step": 101
},
{
"epoch": 1.304904051172708,
"grad_norm": 0.4421441853046417,
"learning_rate": 6.965125158269619e-06,
"loss": 0.7422,
"step": 102
},
{
"epoch": 1.3176972281449895,
"grad_norm": 0.44597184658050537,
"learning_rate": 6.896127313264643e-06,
"loss": 0.7129,
"step": 103
},
{
"epoch": 1.3304904051172708,
"grad_norm": 0.48497170209884644,
"learning_rate": 6.8267051218319766e-06,
"loss": 0.7248,
"step": 104
},
{
"epoch": 1.3432835820895521,
"grad_norm": 0.5230187177658081,
"learning_rate": 6.7568741204067145e-06,
"loss": 0.6923,
"step": 105
},
{
"epoch": 1.3560767590618337,
"grad_norm": 0.47476544976234436,
"learning_rate": 6.686649936914151e-06,
"loss": 0.732,
"step": 106
},
{
"epoch": 1.3688699360341152,
"grad_norm": 0.40445131063461304,
"learning_rate": 6.616048287272301e-06,
"loss": 0.7672,
"step": 107
},
{
"epoch": 1.3816631130063965,
"grad_norm": 0.4726698100566864,
"learning_rate": 6.545084971874738e-06,
"loss": 0.7898,
"step": 108
},
{
"epoch": 1.394456289978678,
"grad_norm": 0.4922250211238861,
"learning_rate": 6.473775872054522e-06,
"loss": 0.7054,
"step": 109
},
{
"epoch": 1.4072494669509594,
"grad_norm": 0.4099796712398529,
"learning_rate": 6.402136946530014e-06,
"loss": 0.6099,
"step": 110
},
{
"epoch": 1.420042643923241,
"grad_norm": 0.5015040040016174,
"learning_rate": 6.330184227833376e-06,
"loss": 0.7295,
"step": 111
},
{
"epoch": 1.4328358208955223,
"grad_norm": 0.43680623173713684,
"learning_rate": 6.257933818722544e-06,
"loss": 0.7267,
"step": 112
},
{
"epoch": 1.4456289978678039,
"grad_norm": 0.4174647927284241,
"learning_rate": 6.185401888577488e-06,
"loss": 0.6789,
"step": 113
},
{
"epoch": 1.4584221748400852,
"grad_norm": 0.45874616503715515,
"learning_rate": 6.112604669781572e-06,
"loss": 0.8178,
"step": 114
},
{
"epoch": 1.4712153518123667,
"grad_norm": 0.39787718653678894,
"learning_rate": 6.039558454088796e-06,
"loss": 0.6645,
"step": 115
},
{
"epoch": 1.4840085287846483,
"grad_norm": 0.6014237403869629,
"learning_rate": 5.9662795889777666e-06,
"loss": 0.7598,
"step": 116
},
{
"epoch": 1.4968017057569296,
"grad_norm": 0.4093945026397705,
"learning_rate": 5.892784473993184e-06,
"loss": 0.6873,
"step": 117
},
{
"epoch": 1.509594882729211,
"grad_norm": 0.48048579692840576,
"learning_rate": 5.819089557075689e-06,
"loss": 0.7689,
"step": 118
},
{
"epoch": 1.5223880597014925,
"grad_norm": 0.45258212089538574,
"learning_rate": 5.745211330880872e-06,
"loss": 0.6531,
"step": 119
},
{
"epoch": 1.535181236673774,
"grad_norm": 0.4489460289478302,
"learning_rate": 5.671166329088278e-06,
"loss": 0.7181,
"step": 120
},
{
"epoch": 1.5479744136460556,
"grad_norm": 0.39668354392051697,
"learning_rate": 5.596971122701221e-06,
"loss": 0.6797,
"step": 121
},
{
"epoch": 1.560767590618337,
"grad_norm": 0.3912845849990845,
"learning_rate": 5.522642316338268e-06,
"loss": 0.736,
"step": 122
},
{
"epoch": 1.5735607675906182,
"grad_norm": 0.45017972588539124,
"learning_rate": 5.448196544517168e-06,
"loss": 0.7289,
"step": 123
},
{
"epoch": 1.5863539445628998,
"grad_norm": 0.4025014042854309,
"learning_rate": 5.373650467932122e-06,
"loss": 0.7216,
"step": 124
},
{
"epoch": 1.5991471215351813,
"grad_norm": 0.4008171558380127,
"learning_rate": 5.299020769725172e-06,
"loss": 0.6882,
"step": 125
},
{
"epoch": 1.6119402985074627,
"grad_norm": 0.3734460175037384,
"learning_rate": 5.224324151752575e-06,
"loss": 0.7057,
"step": 126
},
{
"epoch": 1.624733475479744,
"grad_norm": 0.41330069303512573,
"learning_rate": 5.1495773308469935e-06,
"loss": 0.6429,
"step": 127
},
{
"epoch": 1.6375266524520256,
"grad_norm": 0.409628689289093,
"learning_rate": 5.074797035076319e-06,
"loss": 0.7052,
"step": 128
},
{
"epoch": 1.650319829424307,
"grad_norm": 0.3930547535419464,
"learning_rate": 5e-06,
"loss": 0.773,
"step": 129
},
{
"epoch": 1.6631130063965884,
"grad_norm": 0.47582224011421204,
"learning_rate": 4.9252029649236835e-06,
"loss": 0.7179,
"step": 130
},
{
"epoch": 1.67590618336887,
"grad_norm": 0.4209410548210144,
"learning_rate": 4.850422669153009e-06,
"loss": 0.6379,
"step": 131
},
{
"epoch": 1.6886993603411513,
"grad_norm": 0.4784112870693207,
"learning_rate": 4.775675848247427e-06,
"loss": 0.7232,
"step": 132
},
{
"epoch": 1.7014925373134329,
"grad_norm": 0.44215840101242065,
"learning_rate": 4.700979230274829e-06,
"loss": 0.663,
"step": 133
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.5071489810943604,
"learning_rate": 4.626349532067879e-06,
"loss": 0.7215,
"step": 134
},
{
"epoch": 1.7270788912579957,
"grad_norm": 0.47784915566444397,
"learning_rate": 4.551803455482833e-06,
"loss": 0.788,
"step": 135
},
{
"epoch": 1.739872068230277,
"grad_norm": 0.5307613611221313,
"learning_rate": 4.477357683661734e-06,
"loss": 0.745,
"step": 136
},
{
"epoch": 1.7526652452025586,
"grad_norm": 0.455413281917572,
"learning_rate": 4.4030288772987795e-06,
"loss": 0.7129,
"step": 137
},
{
"epoch": 1.7654584221748402,
"grad_norm": 0.4110308289527893,
"learning_rate": 4.3288336709117246e-06,
"loss": 0.7328,
"step": 138
},
{
"epoch": 1.7782515991471215,
"grad_norm": 0.5132991075515747,
"learning_rate": 4.254788669119127e-06,
"loss": 0.7738,
"step": 139
},
{
"epoch": 1.7910447761194028,
"grad_norm": 0.4838980436325073,
"learning_rate": 4.180910442924312e-06,
"loss": 0.6317,
"step": 140
},
{
"epoch": 1.8038379530916844,
"grad_norm": 0.4499025046825409,
"learning_rate": 4.107215526006818e-06,
"loss": 0.7288,
"step": 141
},
{
"epoch": 1.816631130063966,
"grad_norm": 0.47396278381347656,
"learning_rate": 4.033720411022235e-06,
"loss": 0.776,
"step": 142
},
{
"epoch": 1.8294243070362475,
"grad_norm": 0.3979196548461914,
"learning_rate": 3.960441545911205e-06,
"loss": 0.683,
"step": 143
},
{
"epoch": 1.8422174840085288,
"grad_norm": 0.43219274282455444,
"learning_rate": 3.887395330218429e-06,
"loss": 0.7194,
"step": 144
},
{
"epoch": 1.8550106609808101,
"grad_norm": 0.4215470254421234,
"learning_rate": 3.8145981114225135e-06,
"loss": 0.7744,
"step": 145
},
{
"epoch": 1.8678038379530917,
"grad_norm": 0.4141583740711212,
"learning_rate": 3.7420661812774577e-06,
"loss": 0.7245,
"step": 146
},
{
"epoch": 1.8805970149253732,
"grad_norm": 0.3775157332420349,
"learning_rate": 3.669815772166625e-06,
"loss": 0.6878,
"step": 147
},
{
"epoch": 1.8933901918976546,
"grad_norm": 0.4012461304664612,
"learning_rate": 3.5978630534699873e-06,
"loss": 0.7315,
"step": 148
},
{
"epoch": 1.906183368869936,
"grad_norm": 0.3893410861492157,
"learning_rate": 3.526224127945479e-06,
"loss": 0.6943,
"step": 149
},
{
"epoch": 1.9189765458422174,
"grad_norm": 0.4317570626735687,
"learning_rate": 3.4549150281252635e-06,
"loss": 0.6904,
"step": 150
},
{
"epoch": 1.931769722814499,
"grad_norm": 0.4641903340816498,
"learning_rate": 3.383951712727701e-06,
"loss": 0.7495,
"step": 151
},
{
"epoch": 1.9445628997867805,
"grad_norm": 0.4203890264034271,
"learning_rate": 3.3133500630858507e-06,
"loss": 0.7436,
"step": 152
},
{
"epoch": 1.9573560767590619,
"grad_norm": 0.39781543612480164,
"learning_rate": 3.2431258795932863e-06,
"loss": 0.7042,
"step": 153
},
{
"epoch": 1.9701492537313432,
"grad_norm": 0.4704498052597046,
"learning_rate": 3.173294878168025e-06,
"loss": 0.7379,
"step": 154
},
{
"epoch": 1.9829424307036247,
"grad_norm": 0.39690351486206055,
"learning_rate": 3.1038726867353587e-06,
"loss": 0.6986,
"step": 155
},
{
"epoch": 1.9957356076759063,
"grad_norm": 0.46861645579338074,
"learning_rate": 3.0348748417303826e-06,
"loss": 0.7317,
"step": 156
},
{
"epoch": 2.008528784648188,
"grad_norm": 1.1607420444488525,
"learning_rate": 2.966316784621e-06,
"loss": 1.2703,
"step": 157
},
{
"epoch": 2.021321961620469,
"grad_norm": 0.38464683294296265,
"learning_rate": 2.8982138584521734e-06,
"loss": 0.7064,
"step": 158
},
{
"epoch": 2.0341151385927505,
"grad_norm": 0.5022476315498352,
"learning_rate": 2.83058130441221e-06,
"loss": 0.7061,
"step": 159
},
{
"epoch": 2.046908315565032,
"grad_norm": 0.5013148188591003,
"learning_rate": 2.7634342584218364e-06,
"loss": 0.7014,
"step": 160
},
{
"epoch": 2.0597014925373136,
"grad_norm": 0.47904056310653687,
"learning_rate": 2.6967877477468394e-06,
"loss": 0.6972,
"step": 161
},
{
"epoch": 2.0724946695095947,
"grad_norm": 0.41209492087364197,
"learning_rate": 2.6306566876350072e-06,
"loss": 0.6418,
"step": 162
},
{
"epoch": 2.0852878464818763,
"grad_norm": 0.46157318353652954,
"learning_rate": 2.5650558779781635e-06,
"loss": 0.7516,
"step": 163
},
{
"epoch": 2.098081023454158,
"grad_norm": 0.42628005146980286,
"learning_rate": 2.5000000000000015e-06,
"loss": 0.7129,
"step": 164
},
{
"epoch": 2.1108742004264394,
"grad_norm": 0.45490726828575134,
"learning_rate": 2.43550361297047e-06,
"loss": 0.7031,
"step": 165
},
{
"epoch": 2.1236673773987205,
"grad_norm": 0.4108670651912689,
"learning_rate": 2.371581150947476e-06,
"loss": 0.7059,
"step": 166
},
{
"epoch": 2.136460554371002,
"grad_norm": 0.4316941797733307,
"learning_rate": 2.3082469195465893e-06,
"loss": 0.6921,
"step": 167
},
{
"epoch": 2.1492537313432836,
"grad_norm": 0.4376332759857178,
"learning_rate": 2.245515092739488e-06,
"loss": 0.6459,
"step": 168
},
{
"epoch": 2.162046908315565,
"grad_norm": 0.4335668981075287,
"learning_rate": 2.1833997096818897e-06,
"loss": 0.6815,
"step": 169
},
{
"epoch": 2.1748400852878467,
"grad_norm": 0.37335702776908875,
"learning_rate": 2.1219146715716332e-06,
"loss": 0.6917,
"step": 170
},
{
"epoch": 2.1876332622601278,
"grad_norm": 0.4028381109237671,
"learning_rate": 2.061073738537635e-06,
"loss": 0.6415,
"step": 171
},
{
"epoch": 2.2004264392324093,
"grad_norm": 0.4265342056751251,
"learning_rate": 2.0008905265604316e-06,
"loss": 0.6476,
"step": 172
},
{
"epoch": 2.213219616204691,
"grad_norm": 0.4345915615558624,
"learning_rate": 1.941378504424968e-06,
"loss": 0.8098,
"step": 173
},
{
"epoch": 2.2260127931769724,
"grad_norm": 0.35505157709121704,
"learning_rate": 1.8825509907063328e-06,
"loss": 0.6996,
"step": 174
},
{
"epoch": 2.2388059701492535,
"grad_norm": 0.3520065248012543,
"learning_rate": 1.8244211507891064e-06,
"loss": 0.6946,
"step": 175
},
{
"epoch": 2.251599147121535,
"grad_norm": 0.3407677114009857,
"learning_rate": 1.7670019939210025e-06,
"loss": 0.7051,
"step": 176
},
{
"epoch": 2.2643923240938166,
"grad_norm": 0.39579153060913086,
"learning_rate": 1.7103063703014372e-06,
"loss": 0.6582,
"step": 177
},
{
"epoch": 2.277185501066098,
"grad_norm": 0.3585205078125,
"learning_rate": 1.6543469682057105e-06,
"loss": 0.6591,
"step": 178
},
{
"epoch": 2.2899786780383797,
"grad_norm": 0.3757244646549225,
"learning_rate": 1.5991363111454023e-06,
"loss": 0.6544,
"step": 179
},
{
"epoch": 2.302771855010661,
"grad_norm": 0.34080037474632263,
"learning_rate": 1.544686755065677e-06,
"loss": 0.6695,
"step": 180
},
{
"epoch": 2.3155650319829424,
"grad_norm": 0.36547356843948364,
"learning_rate": 1.4910104855800429e-06,
"loss": 0.6983,
"step": 181
},
{
"epoch": 2.328358208955224,
"grad_norm": 0.3294934630393982,
"learning_rate": 1.438119515243277e-06,
"loss": 0.6253,
"step": 182
},
{
"epoch": 2.3411513859275055,
"grad_norm": 0.3605087399482727,
"learning_rate": 1.3860256808630429e-06,
"loss": 0.6788,
"step": 183
},
{
"epoch": 2.3539445628997866,
"grad_norm": 0.39677152037620544,
"learning_rate": 1.3347406408508695e-06,
"loss": 0.7431,
"step": 184
},
{
"epoch": 2.366737739872068,
"grad_norm": 0.3269940912723541,
"learning_rate": 1.2842758726130283e-06,
"loss": 0.6587,
"step": 185
},
{
"epoch": 2.3795309168443497,
"grad_norm": 0.3385223150253296,
"learning_rate": 1.234642669981946e-06,
"loss": 0.7,
"step": 186
},
{
"epoch": 2.3923240938166312,
"grad_norm": 0.35292384028434753,
"learning_rate": 1.1858521406886674e-06,
"loss": 0.7007,
"step": 187
},
{
"epoch": 2.405117270788913,
"grad_norm": 0.34651538729667664,
"learning_rate": 1.137915203877003e-06,
"loss": 0.7469,
"step": 188
},
{
"epoch": 2.417910447761194,
"grad_norm": 0.32731908559799194,
"learning_rate": 1.0908425876598512e-06,
"loss": 0.6608,
"step": 189
},
{
"epoch": 2.4307036247334755,
"grad_norm": 0.3515162765979767,
"learning_rate": 1.044644826718295e-06,
"loss": 0.7566,
"step": 190
},
{
"epoch": 2.443496801705757,
"grad_norm": 0.31811511516571045,
"learning_rate": 9.993322599439692e-07,
"loss": 0.6596,
"step": 191
},
{
"epoch": 2.4562899786780386,
"grad_norm": 0.3392126262187958,
"learning_rate": 9.549150281252633e-07,
"loss": 0.7325,
"step": 192
},
{
"epoch": 2.4690831556503197,
"grad_norm": 0.33756223320961,
"learning_rate": 9.114030716778433e-07,
"loss": 0.6592,
"step": 193
},
{
"epoch": 2.481876332622601,
"grad_norm": 0.34133580327033997,
"learning_rate": 8.688061284200266e-07,
"loss": 0.7221,
"step": 194
},
{
"epoch": 2.4946695095948828,
"grad_norm": 0.3212495446205139,
"learning_rate": 8.271337313934869e-07,
"loss": 0.6698,
"step": 195
},
{
"epoch": 2.5074626865671643,
"grad_norm": 0.3041667640209198,
"learning_rate": 7.863952067298042e-07,
"loss": 0.691,
"step": 196
},
{
"epoch": 2.520255863539446,
"grad_norm": 0.35703015327453613,
"learning_rate": 7.465996715633028e-07,
"loss": 0.7766,
"step": 197
},
{
"epoch": 2.533049040511727,
"grad_norm": 0.3571200966835022,
"learning_rate": 7.077560319906696e-07,
"loss": 0.7563,
"step": 198
},
{
"epoch": 2.5458422174840085,
"grad_norm": 0.33537036180496216,
"learning_rate": 6.698729810778065e-07,
"loss": 0.7013,
"step": 199
},
{
"epoch": 2.55863539445629,
"grad_norm": 0.3055112063884735,
"learning_rate": 6.329589969143518e-07,
"loss": 0.6536,
"step": 200
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.3426555097103119,
"learning_rate": 5.9702234071631e-07,
"loss": 0.7274,
"step": 201
},
{
"epoch": 2.5842217484008527,
"grad_norm": 0.3432694673538208,
"learning_rate": 5.620710549772295e-07,
"loss": 0.6523,
"step": 202
},
{
"epoch": 2.5970149253731343,
"grad_norm": 0.36352601647377014,
"learning_rate": 5.281129616683167e-07,
"loss": 0.7254,
"step": 203
},
{
"epoch": 2.609808102345416,
"grad_norm": 0.3131335973739624,
"learning_rate": 4.951556604879049e-07,
"loss": 0.6832,
"step": 204
},
{
"epoch": 2.6226012793176974,
"grad_norm": 0.3547823429107666,
"learning_rate": 4.632065271606756e-07,
"loss": 0.7226,
"step": 205
},
{
"epoch": 2.635394456289979,
"grad_norm": 0.2958380877971649,
"learning_rate": 4.322727117869951e-07,
"loss": 0.6507,
"step": 206
},
{
"epoch": 2.64818763326226,
"grad_norm": 0.2793337404727936,
"learning_rate": 4.0236113724274716e-07,
"loss": 0.6371,
"step": 207
},
{
"epoch": 2.6609808102345416,
"grad_norm": 0.33492887020111084,
"learning_rate": 3.734784976300165e-07,
"loss": 0.7792,
"step": 208
},
{
"epoch": 2.673773987206823,
"grad_norm": 0.3088694214820862,
"learning_rate": 3.4563125677897936e-07,
"loss": 0.5887,
"step": 209
},
{
"epoch": 2.6865671641791042,
"grad_norm": 0.3331063389778137,
"learning_rate": 3.18825646801314e-07,
"loss": 0.6793,
"step": 210
},
{
"epoch": 2.699360341151386,
"grad_norm": 0.3037247359752655,
"learning_rate": 2.930676666954846e-07,
"loss": 0.6553,
"step": 211
},
{
"epoch": 2.7121535181236673,
"grad_norm": 0.3542158305644989,
"learning_rate": 2.6836308100417874e-07,
"loss": 0.7101,
"step": 212
},
{
"epoch": 2.724946695095949,
"grad_norm": 0.3504466116428375,
"learning_rate": 2.447174185242324e-07,
"loss": 0.7189,
"step": 213
},
{
"epoch": 2.7377398720682304,
"grad_norm": 0.30136266350746155,
"learning_rate": 2.2213597106929608e-07,
"loss": 0.6076,
"step": 214
},
{
"epoch": 2.750533049040512,
"grad_norm": 0.3250851631164551,
"learning_rate": 2.006237922855553e-07,
"loss": 0.7334,
"step": 215
},
{
"epoch": 2.763326226012793,
"grad_norm": 0.3266373574733734,
"learning_rate": 1.801856965207338e-07,
"loss": 0.7144,
"step": 216
},
{
"epoch": 2.7761194029850746,
"grad_norm": 0.2980578541755676,
"learning_rate": 1.6082625774666793e-07,
"loss": 0.6139,
"step": 217
},
{
"epoch": 2.788912579957356,
"grad_norm": 0.31971806287765503,
"learning_rate": 1.4254980853566248e-07,
"loss": 0.7111,
"step": 218
},
{
"epoch": 2.8017057569296373,
"grad_norm": 0.30652180314064026,
"learning_rate": 1.253604390908819e-07,
"loss": 0.6719,
"step": 219
},
{
"epoch": 2.814498933901919,
"grad_norm": 0.32493188977241516,
"learning_rate": 1.0926199633097156e-07,
"loss": 0.7834,
"step": 220
},
{
"epoch": 2.8272921108742004,
"grad_norm": 0.30289193987846375,
"learning_rate": 9.42580830291373e-08,
"loss": 0.6486,
"step": 221
},
{
"epoch": 2.840085287846482,
"grad_norm": 0.39349403977394104,
"learning_rate": 8.035205700685167e-08,
"loss": 0.6523,
"step": 222
},
{
"epoch": 2.8528784648187635,
"grad_norm": 0.32352349162101746,
"learning_rate": 6.75470303823933e-08,
"loss": 0.708,
"step": 223
},
{
"epoch": 2.8656716417910446,
"grad_norm": 0.31589657068252563,
"learning_rate": 5.584586887435739e-08,
"loss": 0.7025,
"step": 224
},
{
"epoch": 2.878464818763326,
"grad_norm": 0.3233773708343506,
"learning_rate": 4.52511911603265e-08,
"loss": 0.7003,
"step": 225
},
{
"epoch": 2.8912579957356077,
"grad_norm": 0.3037286400794983,
"learning_rate": 3.576536829081323e-08,
"loss": 0.7252,
"step": 226
},
{
"epoch": 2.9040511727078893,
"grad_norm": 0.3222990334033966,
"learning_rate": 2.7390523158633552e-08,
"loss": 0.7079,
"step": 227
},
{
"epoch": 2.9168443496801704,
"grad_norm": 0.30910524725914,
"learning_rate": 2.012853002380466e-08,
"loss": 0.6701,
"step": 228
},
{
"epoch": 2.929637526652452,
"grad_norm": 0.29240769147872925,
"learning_rate": 1.3981014094099354e-08,
"loss": 0.6388,
"step": 229
},
{
"epoch": 2.9424307036247335,
"grad_norm": 0.34161651134490967,
"learning_rate": 8.949351161324227e-09,
"loss": 0.7613,
"step": 230
},
{
"epoch": 2.955223880597015,
"grad_norm": 0.299943745136261,
"learning_rate": 5.034667293427053e-09,
"loss": 0.6471,
"step": 231
},
{
"epoch": 2.9680170575692966,
"grad_norm": 0.3515676259994507,
"learning_rate": 2.237838582483387e-09,
"loss": 0.7412,
"step": 232
},
{
"epoch": 2.9808102345415777,
"grad_norm": 0.3143077790737152,
"learning_rate": 5.594909486328348e-10,
"loss": 0.6572,
"step": 233
},
{
"epoch": 2.9936034115138592,
"grad_norm": 0.3188575506210327,
"learning_rate": 0.0,
"loss": 0.7006,
"step": 234
},
{
"epoch": 2.9936034115138592,
"step": 234,
"total_flos": 327743678562304.0,
"train_loss": 0.7533083028263516,
"train_runtime": 9147.0121,
"train_samples_per_second": 2.46,
"train_steps_per_second": 0.026
}
],
"logging_steps": 1.0,
"max_steps": 234,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 327743678562304.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}