pllum_8b_tuned / trainer_state.json
pihull's picture
Upload folder using huggingface_hub
c16ade5 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009142857142857144,
"grad_norm": 3.788423157943329,
"learning_rate": 0.0,
"loss": 1.0171,
"num_tokens": 414791.0,
"step": 1
},
{
"epoch": 0.018285714285714287,
"grad_norm": 3.7486067929136304,
"learning_rate": 2.9411764705882356e-07,
"loss": 1.0458,
"num_tokens": 816383.0,
"step": 2
},
{
"epoch": 0.027428571428571427,
"grad_norm": 3.6248817482667772,
"learning_rate": 5.882352941176471e-07,
"loss": 1.0156,
"num_tokens": 1248217.0,
"step": 3
},
{
"epoch": 0.036571428571428574,
"grad_norm": 3.8501840793231983,
"learning_rate": 8.823529411764707e-07,
"loss": 0.9988,
"num_tokens": 1657241.0,
"step": 4
},
{
"epoch": 0.045714285714285714,
"grad_norm": 3.5112388358944506,
"learning_rate": 1.1764705882352942e-06,
"loss": 1.0094,
"num_tokens": 2097247.0,
"step": 5
},
{
"epoch": 0.054857142857142854,
"grad_norm": 3.305630960821923,
"learning_rate": 1.4705882352941177e-06,
"loss": 1.0067,
"num_tokens": 2518049.0,
"step": 6
},
{
"epoch": 0.064,
"grad_norm": 2.763998457471438,
"learning_rate": 1.7647058823529414e-06,
"loss": 0.9815,
"num_tokens": 2911785.0,
"step": 7
},
{
"epoch": 0.07314285714285715,
"grad_norm": 2.553777222235427,
"learning_rate": 2.058823529411765e-06,
"loss": 0.9884,
"num_tokens": 3304861.0,
"step": 8
},
{
"epoch": 0.08228571428571428,
"grad_norm": 1.7314602879429424,
"learning_rate": 2.3529411764705885e-06,
"loss": 0.9657,
"num_tokens": 3744952.0,
"step": 9
},
{
"epoch": 0.09142857142857143,
"grad_norm": 1.639488114011472,
"learning_rate": 2.647058823529412e-06,
"loss": 0.9173,
"num_tokens": 4171394.0,
"step": 10
},
{
"epoch": 0.10057142857142858,
"grad_norm": 1.605492636449322,
"learning_rate": 2.9411764705882355e-06,
"loss": 0.9402,
"num_tokens": 4593000.0,
"step": 11
},
{
"epoch": 0.10971428571428571,
"grad_norm": 1.7144819324641203,
"learning_rate": 3.2352941176470594e-06,
"loss": 0.931,
"num_tokens": 5051170.0,
"step": 12
},
{
"epoch": 0.11885714285714286,
"grad_norm": 2.1335893786941633,
"learning_rate": 3.529411764705883e-06,
"loss": 0.873,
"num_tokens": 5462412.0,
"step": 13
},
{
"epoch": 0.128,
"grad_norm": 1.8322905891256291,
"learning_rate": 3.8235294117647055e-06,
"loss": 0.8839,
"num_tokens": 5897197.0,
"step": 14
},
{
"epoch": 0.13714285714285715,
"grad_norm": 1.3855988250523144,
"learning_rate": 4.11764705882353e-06,
"loss": 0.8531,
"num_tokens": 6310546.0,
"step": 15
},
{
"epoch": 0.1462857142857143,
"grad_norm": 1.2583291182608385,
"learning_rate": 4.411764705882353e-06,
"loss": 0.8449,
"num_tokens": 6737190.0,
"step": 16
},
{
"epoch": 0.15542857142857142,
"grad_norm": 1.7093622730710312,
"learning_rate": 4.705882352941177e-06,
"loss": 0.8459,
"num_tokens": 7152523.0,
"step": 17
},
{
"epoch": 0.16457142857142856,
"grad_norm": 1.3548099953678236,
"learning_rate": 5e-06,
"loss": 0.8382,
"num_tokens": 7614210.0,
"step": 18
},
{
"epoch": 0.1737142857142857,
"grad_norm": 1.2411537968080228,
"learning_rate": 4.999874073411688e-06,
"loss": 0.8003,
"num_tokens": 8005070.0,
"step": 19
},
{
"epoch": 0.18285714285714286,
"grad_norm": 1.1063653911297622,
"learning_rate": 4.999496306332755e-06,
"loss": 0.8161,
"num_tokens": 8458443.0,
"step": 20
},
{
"epoch": 0.192,
"grad_norm": 0.8870246401694026,
"learning_rate": 4.998866736819938e-06,
"loss": 0.8175,
"num_tokens": 8886223.0,
"step": 21
},
{
"epoch": 0.20114285714285715,
"grad_norm": 0.8378533988358792,
"learning_rate": 4.997985428296869e-06,
"loss": 0.79,
"num_tokens": 9308544.0,
"step": 22
},
{
"epoch": 0.2102857142857143,
"grad_norm": 0.8680117397968262,
"learning_rate": 4.996852469547688e-06,
"loss": 0.7697,
"num_tokens": 9738279.0,
"step": 23
},
{
"epoch": 0.21942857142857142,
"grad_norm": 0.7814254549991426,
"learning_rate": 4.9954679747081e-06,
"loss": 0.7791,
"num_tokens": 10176776.0,
"step": 24
},
{
"epoch": 0.22857142857142856,
"grad_norm": 0.6718931761429743,
"learning_rate": 4.993832083253874e-06,
"loss": 0.7549,
"num_tokens": 10607900.0,
"step": 25
},
{
"epoch": 0.2377142857142857,
"grad_norm": 0.7203929761049984,
"learning_rate": 4.991944959986793e-06,
"loss": 0.7647,
"num_tokens": 11029388.0,
"step": 26
},
{
"epoch": 0.24685714285714286,
"grad_norm": 0.7147312112109525,
"learning_rate": 4.989806795018054e-06,
"loss": 0.7638,
"num_tokens": 11457489.0,
"step": 27
},
{
"epoch": 0.256,
"grad_norm": 0.6521467633335539,
"learning_rate": 4.987417803749112e-06,
"loss": 0.7835,
"num_tokens": 11864446.0,
"step": 28
},
{
"epoch": 0.2651428571428571,
"grad_norm": 0.6625801512953233,
"learning_rate": 4.984778226849983e-06,
"loss": 0.7627,
"num_tokens": 12248848.0,
"step": 29
},
{
"epoch": 0.2742857142857143,
"grad_norm": 0.630181987147504,
"learning_rate": 4.981888330234998e-06,
"loss": 0.75,
"num_tokens": 12663440.0,
"step": 30
},
{
"epoch": 0.2834285714285714,
"grad_norm": 0.6381224000786306,
"learning_rate": 4.978748405036014e-06,
"loss": 0.7493,
"num_tokens": 13034106.0,
"step": 31
},
{
"epoch": 0.2925714285714286,
"grad_norm": 0.6161172594215469,
"learning_rate": 4.975358767573085e-06,
"loss": 0.7586,
"num_tokens": 13448760.0,
"step": 32
},
{
"epoch": 0.3017142857142857,
"grad_norm": 0.5841875666239731,
"learning_rate": 4.971719759322596e-06,
"loss": 0.7526,
"num_tokens": 13907189.0,
"step": 33
},
{
"epoch": 0.31085714285714283,
"grad_norm": 0.6291433005199626,
"learning_rate": 4.967831746882863e-06,
"loss": 0.7424,
"num_tokens": 14306209.0,
"step": 34
},
{
"epoch": 0.32,
"grad_norm": 0.5515606238598844,
"learning_rate": 4.9636951219372e-06,
"loss": 0.7547,
"num_tokens": 14720460.0,
"step": 35
},
{
"epoch": 0.3291428571428571,
"grad_norm": 0.5614465276031775,
"learning_rate": 4.959310301214458e-06,
"loss": 0.7288,
"num_tokens": 15132163.0,
"step": 36
},
{
"epoch": 0.3382857142857143,
"grad_norm": 0.6054958180004087,
"learning_rate": 4.954677726447049e-06,
"loss": 0.7372,
"num_tokens": 15497706.0,
"step": 37
},
{
"epoch": 0.3474285714285714,
"grad_norm": 0.5245577430458518,
"learning_rate": 4.949797864326442e-06,
"loss": 0.7084,
"num_tokens": 15931655.0,
"step": 38
},
{
"epoch": 0.3565714285714286,
"grad_norm": 0.5254082660408352,
"learning_rate": 4.944671206456148e-06,
"loss": 0.7091,
"num_tokens": 16341137.0,
"step": 39
},
{
"epoch": 0.3657142857142857,
"grad_norm": 0.5538924183973399,
"learning_rate": 4.939298269302194e-06,
"loss": 0.7452,
"num_tokens": 16776527.0,
"step": 40
},
{
"epoch": 0.37485714285714283,
"grad_norm": 0.5542948467918837,
"learning_rate": 4.933679594141096e-06,
"loss": 0.7374,
"num_tokens": 17183370.0,
"step": 41
},
{
"epoch": 0.384,
"grad_norm": 0.5380100952791218,
"learning_rate": 4.9278157470053305e-06,
"loss": 0.7244,
"num_tokens": 17602865.0,
"step": 42
},
{
"epoch": 0.3931428571428571,
"grad_norm": 0.5519363633677055,
"learning_rate": 4.9217073186263075e-06,
"loss": 0.7253,
"num_tokens": 18002059.0,
"step": 43
},
{
"epoch": 0.4022857142857143,
"grad_norm": 0.5659780047314049,
"learning_rate": 4.915354924374864e-06,
"loss": 0.7209,
"num_tokens": 18417553.0,
"step": 44
},
{
"epoch": 0.4114285714285714,
"grad_norm": 0.5488792186326696,
"learning_rate": 4.908759204199268e-06,
"loss": 0.7396,
"num_tokens": 18840096.0,
"step": 45
},
{
"epoch": 0.4205714285714286,
"grad_norm": 0.5252663101552106,
"learning_rate": 4.901920822560753e-06,
"loss": 0.7127,
"num_tokens": 19258374.0,
"step": 46
},
{
"epoch": 0.4297142857142857,
"grad_norm": 0.5591357036458139,
"learning_rate": 4.89484046836657e-06,
"loss": 0.7206,
"num_tokens": 19680030.0,
"step": 47
},
{
"epoch": 0.43885714285714283,
"grad_norm": 0.5556426602282065,
"learning_rate": 4.887518854900603e-06,
"loss": 0.697,
"num_tokens": 20098565.0,
"step": 48
},
{
"epoch": 0.448,
"grad_norm": 0.5510078792794185,
"learning_rate": 4.879956719751491e-06,
"loss": 0.745,
"num_tokens": 20542243.0,
"step": 49
},
{
"epoch": 0.45714285714285713,
"grad_norm": 0.5396047474298085,
"learning_rate": 4.87215482473834e-06,
"loss": 0.713,
"num_tokens": 20958663.0,
"step": 50
},
{
"epoch": 0.4662857142857143,
"grad_norm": 0.5199497319908324,
"learning_rate": 4.864113955833967e-06,
"loss": 0.7014,
"num_tokens": 21384478.0,
"step": 51
},
{
"epoch": 0.4754285714285714,
"grad_norm": 0.5529473043892272,
"learning_rate": 4.855834923085721e-06,
"loss": 0.7063,
"num_tokens": 21791929.0,
"step": 52
},
{
"epoch": 0.4845714285714286,
"grad_norm": 0.5585081497740281,
"learning_rate": 4.847318560533882e-06,
"loss": 0.7248,
"num_tokens": 22203013.0,
"step": 53
},
{
"epoch": 0.4937142857142857,
"grad_norm": 0.5315034055299764,
"learning_rate": 4.838565726127636e-06,
"loss": 0.6891,
"num_tokens": 22601890.0,
"step": 54
},
{
"epoch": 0.5028571428571429,
"grad_norm": 0.527611783496754,
"learning_rate": 4.829577301638642e-06,
"loss": 0.7488,
"num_tokens": 23070128.0,
"step": 55
},
{
"epoch": 0.512,
"grad_norm": 0.5596739037421498,
"learning_rate": 4.8203541925722016e-06,
"loss": 0.7119,
"num_tokens": 23490480.0,
"step": 56
},
{
"epoch": 0.5211428571428571,
"grad_norm": 0.569290949582533,
"learning_rate": 4.810897328076045e-06,
"loss": 0.6961,
"num_tokens": 23887492.0,
"step": 57
},
{
"epoch": 0.5302857142857142,
"grad_norm": 0.5267117373322459,
"learning_rate": 4.801207660846717e-06,
"loss": 0.6924,
"num_tokens": 24320449.0,
"step": 58
},
{
"epoch": 0.5394285714285715,
"grad_norm": 0.5083151481320303,
"learning_rate": 4.7912861670336065e-06,
"loss": 0.6953,
"num_tokens": 24769809.0,
"step": 59
},
{
"epoch": 0.5485714285714286,
"grad_norm": 0.5537578982564932,
"learning_rate": 4.781133846140606e-06,
"loss": 0.709,
"num_tokens": 25191175.0,
"step": 60
},
{
"epoch": 0.5577142857142857,
"grad_norm": 0.4943590155288578,
"learning_rate": 4.770751720925422e-06,
"loss": 0.7077,
"num_tokens": 25624365.0,
"step": 61
},
{
"epoch": 0.5668571428571428,
"grad_norm": 0.5160855203562402,
"learning_rate": 4.760140837296542e-06,
"loss": 0.7081,
"num_tokens": 26058172.0,
"step": 62
},
{
"epoch": 0.576,
"grad_norm": 0.5284271224337476,
"learning_rate": 4.7493022642078654e-06,
"loss": 0.7044,
"num_tokens": 26455012.0,
"step": 63
},
{
"epoch": 0.5851428571428572,
"grad_norm": 0.5202512411868518,
"learning_rate": 4.7382370935510165e-06,
"loss": 0.7196,
"num_tokens": 26892158.0,
"step": 64
},
{
"epoch": 0.5942857142857143,
"grad_norm": 0.5157697929801275,
"learning_rate": 4.726946440045348e-06,
"loss": 0.7046,
"num_tokens": 27320636.0,
"step": 65
},
{
"epoch": 0.6034285714285714,
"grad_norm": 0.5189217024488914,
"learning_rate": 4.715431441125639e-06,
"loss": 0.6627,
"num_tokens": 27766323.0,
"step": 66
},
{
"epoch": 0.6125714285714285,
"grad_norm": 0.5635204498629328,
"learning_rate": 4.703693256827515e-06,
"loss": 0.6931,
"num_tokens": 28189830.0,
"step": 67
},
{
"epoch": 0.6217142857142857,
"grad_norm": 0.5082211284375098,
"learning_rate": 4.691733069670575e-06,
"loss": 0.6451,
"num_tokens": 28627346.0,
"step": 68
},
{
"epoch": 0.6308571428571429,
"grad_norm": 0.5232606496406234,
"learning_rate": 4.679552084539271e-06,
"loss": 0.671,
"num_tokens": 29057945.0,
"step": 69
},
{
"epoch": 0.64,
"grad_norm": 0.53201837236867,
"learning_rate": 4.667151528561522e-06,
"loss": 0.7202,
"num_tokens": 29486194.0,
"step": 70
},
{
"epoch": 0.6491428571428571,
"grad_norm": 0.561020955033982,
"learning_rate": 4.6545326509850965e-06,
"loss": 0.7006,
"num_tokens": 29845300.0,
"step": 71
},
{
"epoch": 0.6582857142857143,
"grad_norm": 0.5297748591428174,
"learning_rate": 4.641696723051753e-06,
"loss": 0.7088,
"num_tokens": 30298906.0,
"step": 72
},
{
"epoch": 0.6674285714285715,
"grad_norm": 0.5129774250057094,
"learning_rate": 4.628645037869183e-06,
"loss": 0.6655,
"num_tokens": 30729078.0,
"step": 73
},
{
"epoch": 0.6765714285714286,
"grad_norm": 0.5488508086875596,
"learning_rate": 4.615378910280735e-06,
"loss": 0.6962,
"num_tokens": 31142347.0,
"step": 74
},
{
"epoch": 0.6857142857142857,
"grad_norm": 0.5214375076220712,
"learning_rate": 4.60189967673296e-06,
"loss": 0.6992,
"num_tokens": 31530611.0,
"step": 75
},
{
"epoch": 0.6948571428571428,
"grad_norm": 0.5365497011388887,
"learning_rate": 4.588208695140972e-06,
"loss": 0.6856,
"num_tokens": 31934290.0,
"step": 76
},
{
"epoch": 0.704,
"grad_norm": 0.5239414066377821,
"learning_rate": 4.574307344751654e-06,
"loss": 0.6689,
"num_tokens": 32336516.0,
"step": 77
},
{
"epoch": 0.7131428571428572,
"grad_norm": 0.4905474692474024,
"learning_rate": 4.560197026004706e-06,
"loss": 0.6616,
"num_tokens": 32754867.0,
"step": 78
},
{
"epoch": 0.7222857142857143,
"grad_norm": 0.527006404732485,
"learning_rate": 4.5458791603915695e-06,
"loss": 0.672,
"num_tokens": 33177777.0,
"step": 79
},
{
"epoch": 0.7314285714285714,
"grad_norm": 0.49102371409141427,
"learning_rate": 4.5313551903122195e-06,
"loss": 0.6428,
"num_tokens": 33595281.0,
"step": 80
},
{
"epoch": 0.7405714285714285,
"grad_norm": 0.5266302281242587,
"learning_rate": 4.516626578929857e-06,
"loss": 0.685,
"num_tokens": 34010874.0,
"step": 81
},
{
"epoch": 0.7497142857142857,
"grad_norm": 0.5219018971926723,
"learning_rate": 4.501694810023506e-06,
"loss": 0.7053,
"num_tokens": 34452852.0,
"step": 82
},
{
"epoch": 0.7588571428571429,
"grad_norm": 0.5339612937638695,
"learning_rate": 4.486561387838539e-06,
"loss": 0.6656,
"num_tokens": 34850322.0,
"step": 83
},
{
"epoch": 0.768,
"grad_norm": 0.557261181662059,
"learning_rate": 4.471227836935139e-06,
"loss": 0.698,
"num_tokens": 35255541.0,
"step": 84
},
{
"epoch": 0.7771428571428571,
"grad_norm": 0.5282377710472309,
"learning_rate": 4.455695702034705e-06,
"loss": 0.6832,
"num_tokens": 35646651.0,
"step": 85
},
{
"epoch": 0.7862857142857143,
"grad_norm": 0.5054707178540805,
"learning_rate": 4.439966547864243e-06,
"loss": 0.6559,
"num_tokens": 36068819.0,
"step": 86
},
{
"epoch": 0.7954285714285714,
"grad_norm": 0.5150927571466014,
"learning_rate": 4.424041958998732e-06,
"loss": 0.6795,
"num_tokens": 36494246.0,
"step": 87
},
{
"epoch": 0.8045714285714286,
"grad_norm": 0.5151785920486077,
"learning_rate": 4.407923539701486e-06,
"loss": 0.7082,
"num_tokens": 36935755.0,
"step": 88
},
{
"epoch": 0.8137142857142857,
"grad_norm": 0.5219798416078772,
"learning_rate": 4.391612913762549e-06,
"loss": 0.6592,
"num_tokens": 37366636.0,
"step": 89
},
{
"epoch": 0.8228571428571428,
"grad_norm": 0.4814532946235625,
"learning_rate": 4.375111724335102e-06,
"loss": 0.6282,
"num_tokens": 37765456.0,
"step": 90
},
{
"epoch": 0.832,
"grad_norm": 0.5416412082011206,
"learning_rate": 4.358421633769934e-06,
"loss": 0.6744,
"num_tokens": 38150281.0,
"step": 91
},
{
"epoch": 0.8411428571428572,
"grad_norm": 0.48068802364165214,
"learning_rate": 4.341544323447978e-06,
"loss": 0.6873,
"num_tokens": 38679689.0,
"step": 92
},
{
"epoch": 0.8502857142857143,
"grad_norm": 0.4918360073106731,
"learning_rate": 4.324481493610919e-06,
"loss": 0.6935,
"num_tokens": 39124128.0,
"step": 93
},
{
"epoch": 0.8594285714285714,
"grad_norm": 0.5218436245838689,
"learning_rate": 4.307234863189917e-06,
"loss": 0.6905,
"num_tokens": 39548028.0,
"step": 94
},
{
"epoch": 0.8685714285714285,
"grad_norm": 0.5335931444817835,
"learning_rate": 4.289806169632434e-06,
"loss": 0.7121,
"num_tokens": 39948722.0,
"step": 95
},
{
"epoch": 0.8777142857142857,
"grad_norm": 0.5136306733831099,
"learning_rate": 4.272197168727204e-06,
"loss": 0.6711,
"num_tokens": 40371033.0,
"step": 96
},
{
"epoch": 0.8868571428571429,
"grad_norm": 0.5071570494757102,
"learning_rate": 4.254409634427356e-06,
"loss": 0.6654,
"num_tokens": 40782038.0,
"step": 97
},
{
"epoch": 0.896,
"grad_norm": 0.49868261307658696,
"learning_rate": 4.236445358671696e-06,
"loss": 0.668,
"num_tokens": 41221994.0,
"step": 98
},
{
"epoch": 0.9051428571428571,
"grad_norm": 0.4999015555327391,
"learning_rate": 4.218306151204188e-06,
"loss": 0.6829,
"num_tokens": 41655227.0,
"step": 99
},
{
"epoch": 0.9142857142857143,
"grad_norm": 0.5267277450463913,
"learning_rate": 4.1999938393916424e-06,
"loss": 0.6966,
"num_tokens": 42086636.0,
"step": 100
},
{
"epoch": 0.9234285714285714,
"grad_norm": 0.5098818224161321,
"learning_rate": 4.18151026803962e-06,
"loss": 0.7018,
"num_tokens": 42524682.0,
"step": 101
},
{
"epoch": 0.9325714285714286,
"grad_norm": 0.5472107304446481,
"learning_rate": 4.162857299206584e-06,
"loss": 0.695,
"num_tokens": 42919957.0,
"step": 102
},
{
"epoch": 0.9417142857142857,
"grad_norm": 0.5193667055229434,
"learning_rate": 4.144036812016317e-06,
"loss": 0.6772,
"num_tokens": 43334031.0,
"step": 103
},
{
"epoch": 0.9508571428571428,
"grad_norm": 0.5769899431492905,
"learning_rate": 4.1250507024686115e-06,
"loss": 0.6943,
"num_tokens": 43779138.0,
"step": 104
},
{
"epoch": 0.96,
"grad_norm": 0.49582865560110145,
"learning_rate": 4.105900883248269e-06,
"loss": 0.6589,
"num_tokens": 44181409.0,
"step": 105
},
{
"epoch": 0.9691428571428572,
"grad_norm": 0.5560209310215001,
"learning_rate": 4.08658928353241e-06,
"loss": 0.6683,
"num_tokens": 44565057.0,
"step": 106
},
{
"epoch": 0.9782857142857143,
"grad_norm": 0.5105250336516403,
"learning_rate": 4.06711784879613e-06,
"loss": 0.6656,
"num_tokens": 44990460.0,
"step": 107
},
{
"epoch": 0.9874285714285714,
"grad_norm": 0.47822137614162397,
"learning_rate": 4.047488540616503e-06,
"loss": 0.6905,
"num_tokens": 45448868.0,
"step": 108
},
{
"epoch": 0.9965714285714286,
"grad_norm": 0.5267943756779967,
"learning_rate": 4.027703336474979e-06,
"loss": 0.6913,
"num_tokens": 45865085.0,
"step": 109
},
{
"epoch": 1.0,
"grad_norm": 0.7513864322987849,
"learning_rate": 4.0077642295581605e-06,
"loss": 0.6751,
"num_tokens": 46043993.0,
"step": 110
},
{
"epoch": 1.0091428571428571,
"grad_norm": 0.652397229541187,
"learning_rate": 3.987673228557017e-06,
"loss": 0.6124,
"num_tokens": 46444743.0,
"step": 111
},
{
"epoch": 1.0182857142857142,
"grad_norm": 0.5613822445181739,
"learning_rate": 3.967432357464518e-06,
"loss": 0.6334,
"num_tokens": 46868932.0,
"step": 112
},
{
"epoch": 1.0274285714285714,
"grad_norm": 0.548463499167073,
"learning_rate": 3.947043655371734e-06,
"loss": 0.637,
"num_tokens": 47306605.0,
"step": 113
},
{
"epoch": 1.0365714285714285,
"grad_norm": 0.5985099049112745,
"learning_rate": 3.9265091762624225e-06,
"loss": 0.6684,
"num_tokens": 47766810.0,
"step": 114
},
{
"epoch": 1.0457142857142858,
"grad_norm": 0.5614950162690504,
"learning_rate": 3.905830988806101e-06,
"loss": 0.6386,
"num_tokens": 48217403.0,
"step": 115
},
{
"epoch": 1.054857142857143,
"grad_norm": 0.5636791310211307,
"learning_rate": 3.885011176149647e-06,
"loss": 0.6403,
"num_tokens": 48667599.0,
"step": 116
},
{
"epoch": 1.064,
"grad_norm": 0.5447491560984974,
"learning_rate": 3.864051835707444e-06,
"loss": 0.6637,
"num_tokens": 49116575.0,
"step": 117
},
{
"epoch": 1.0731428571428572,
"grad_norm": 0.564065307250827,
"learning_rate": 3.842955078950079e-06,
"loss": 0.6563,
"num_tokens": 49548979.0,
"step": 118
},
{
"epoch": 1.0822857142857143,
"grad_norm": 0.5750693750810204,
"learning_rate": 3.8217230311916365e-06,
"loss": 0.6542,
"num_tokens": 49944685.0,
"step": 119
},
{
"epoch": 1.0914285714285714,
"grad_norm": 0.5664157697505491,
"learning_rate": 3.800357831375583e-06,
"loss": 0.6311,
"num_tokens": 50369856.0,
"step": 120
},
{
"epoch": 1.1005714285714285,
"grad_norm": 0.5643319728604673,
"learning_rate": 3.778861631859298e-06,
"loss": 0.635,
"num_tokens": 50756699.0,
"step": 121
},
{
"epoch": 1.1097142857142857,
"grad_norm": 0.5490242895971412,
"learning_rate": 3.7572365981972335e-06,
"loss": 0.6504,
"num_tokens": 51212014.0,
"step": 122
},
{
"epoch": 1.1188571428571428,
"grad_norm": 0.5852924645249652,
"learning_rate": 3.735484908922759e-06,
"loss": 0.6337,
"num_tokens": 51613256.0,
"step": 123
},
{
"epoch": 1.1280000000000001,
"grad_norm": 0.524011985834964,
"learning_rate": 3.7136087553286916e-06,
"loss": 0.6244,
"num_tokens": 52060195.0,
"step": 124
},
{
"epoch": 1.1371428571428572,
"grad_norm": 0.5418140690754942,
"learning_rate": 3.6916103412465405e-06,
"loss": 0.6335,
"num_tokens": 52475517.0,
"step": 125
},
{
"epoch": 1.1462857142857144,
"grad_norm": 0.5431784069939521,
"learning_rate": 3.6694918828244923e-06,
"loss": 0.6151,
"num_tokens": 52916813.0,
"step": 126
},
{
"epoch": 1.1554285714285715,
"grad_norm": 0.5331182901674993,
"learning_rate": 3.647255608304154e-06,
"loss": 0.5944,
"num_tokens": 53329446.0,
"step": 127
},
{
"epoch": 1.1645714285714286,
"grad_norm": 0.5669880957159196,
"learning_rate": 3.6249037577960744e-06,
"loss": 0.6548,
"num_tokens": 53744113.0,
"step": 128
},
{
"epoch": 1.1737142857142857,
"grad_norm": 0.5640955386477013,
"learning_rate": 3.6024385830540758e-06,
"loss": 0.6232,
"num_tokens": 54171017.0,
"step": 129
},
{
"epoch": 1.1828571428571428,
"grad_norm": 0.566123338215568,
"learning_rate": 3.5798623472484074e-06,
"loss": 0.622,
"num_tokens": 54595667.0,
"step": 130
},
{
"epoch": 1.192,
"grad_norm": 0.5164284224002408,
"learning_rate": 3.5571773247377495e-06,
"loss": 0.6247,
"num_tokens": 55038242.0,
"step": 131
},
{
"epoch": 1.201142857142857,
"grad_norm": 0.5193063226224068,
"learning_rate": 3.5343858008400955e-06,
"loss": 0.6133,
"num_tokens": 55441117.0,
"step": 132
},
{
"epoch": 1.2102857142857144,
"grad_norm": 0.5594859010728502,
"learning_rate": 3.511490071602523e-06,
"loss": 0.5845,
"num_tokens": 55852545.0,
"step": 133
},
{
"epoch": 1.2194285714285713,
"grad_norm": 0.5312154621480816,
"learning_rate": 3.4884924435698875e-06,
"loss": 0.6139,
"num_tokens": 56305302.0,
"step": 134
},
{
"epoch": 1.2285714285714286,
"grad_norm": 0.5082891383546045,
"learning_rate": 3.465395233552458e-06,
"loss": 0.6162,
"num_tokens": 56746085.0,
"step": 135
},
{
"epoch": 1.2377142857142858,
"grad_norm": 0.5183563749809662,
"learning_rate": 3.4422007683925224e-06,
"loss": 0.5981,
"num_tokens": 57164232.0,
"step": 136
},
{
"epoch": 1.2468571428571429,
"grad_norm": 0.5424921739894077,
"learning_rate": 3.418911384729971e-06,
"loss": 0.6318,
"num_tokens": 57569040.0,
"step": 137
},
{
"epoch": 1.256,
"grad_norm": 0.5176334342259454,
"learning_rate": 3.395529428766907e-06,
"loss": 0.6473,
"num_tokens": 58021968.0,
"step": 138
},
{
"epoch": 1.2651428571428571,
"grad_norm": 0.5084583167785991,
"learning_rate": 3.3720572560312854e-06,
"loss": 0.6046,
"num_tokens": 58462943.0,
"step": 139
},
{
"epoch": 1.2742857142857142,
"grad_norm": 0.5103522984652497,
"learning_rate": 3.3484972311396114e-06,
"loss": 0.5896,
"num_tokens": 58899697.0,
"step": 140
},
{
"epoch": 1.2834285714285714,
"grad_norm": 0.5357080439117383,
"learning_rate": 3.3248517275587292e-06,
"loss": 0.6151,
"num_tokens": 59317087.0,
"step": 141
},
{
"epoch": 1.2925714285714287,
"grad_norm": 0.5266818795859725,
"learning_rate": 3.3011231273667155e-06,
"loss": 0.6402,
"num_tokens": 59740685.0,
"step": 142
},
{
"epoch": 1.3017142857142856,
"grad_norm": 0.5515879967156198,
"learning_rate": 3.2773138210129037e-06,
"loss": 0.6256,
"num_tokens": 60175007.0,
"step": 143
},
{
"epoch": 1.310857142857143,
"grad_norm": 0.5231964139544342,
"learning_rate": 3.253426207077069e-06,
"loss": 0.6185,
"num_tokens": 60569710.0,
"step": 144
},
{
"epoch": 1.32,
"grad_norm": 0.5394300392957713,
"learning_rate": 3.2294626920277928e-06,
"loss": 0.6372,
"num_tokens": 60995749.0,
"step": 145
},
{
"epoch": 1.3291428571428572,
"grad_norm": 0.5288477560850162,
"learning_rate": 3.20542568998003e-06,
"loss": 0.6168,
"num_tokens": 61416757.0,
"step": 146
},
{
"epoch": 1.3382857142857143,
"grad_norm": 0.5412372637528483,
"learning_rate": 3.181317622451909e-06,
"loss": 0.621,
"num_tokens": 61838293.0,
"step": 147
},
{
"epoch": 1.3474285714285714,
"grad_norm": 0.5408790490021942,
"learning_rate": 3.1571409181207867e-06,
"loss": 0.6284,
"num_tokens": 62266608.0,
"step": 148
},
{
"epoch": 1.3565714285714285,
"grad_norm": 0.5449668003305271,
"learning_rate": 3.132898012578577e-06,
"loss": 0.6086,
"num_tokens": 62661670.0,
"step": 149
},
{
"epoch": 1.3657142857142857,
"grad_norm": 0.49032514112728875,
"learning_rate": 3.108591348086388e-06,
"loss": 0.626,
"num_tokens": 63105118.0,
"step": 150
},
{
"epoch": 1.3748571428571428,
"grad_norm": 0.532715786721113,
"learning_rate": 3.0842233733284866e-06,
"loss": 0.6034,
"num_tokens": 63547019.0,
"step": 151
},
{
"epoch": 1.384,
"grad_norm": 0.5606001997788402,
"learning_rate": 3.0597965431656125e-06,
"loss": 0.6008,
"num_tokens": 63969387.0,
"step": 152
},
{
"epoch": 1.3931428571428572,
"grad_norm": 0.5496057628803153,
"learning_rate": 3.0353133183876745e-06,
"loss": 0.627,
"num_tokens": 64374671.0,
"step": 153
},
{
"epoch": 1.4022857142857144,
"grad_norm": 0.5524369124857681,
"learning_rate": 3.0107761654658464e-06,
"loss": 0.605,
"num_tokens": 64756768.0,
"step": 154
},
{
"epoch": 1.4114285714285715,
"grad_norm": 0.5533113651256814,
"learning_rate": 2.986187556304091e-06,
"loss": 0.6181,
"num_tokens": 65142352.0,
"step": 155
},
{
"epoch": 1.4205714285714286,
"grad_norm": 0.5394567163244798,
"learning_rate": 2.961549967990139e-06,
"loss": 0.6582,
"num_tokens": 65566229.0,
"step": 156
},
{
"epoch": 1.4297142857142857,
"grad_norm": 0.5320474412403765,
"learning_rate": 2.9368658825459452e-06,
"loss": 0.6387,
"num_tokens": 66047263.0,
"step": 157
},
{
"epoch": 1.4388571428571428,
"grad_norm": 0.498957441050946,
"learning_rate": 2.912137786677639e-06,
"loss": 0.6336,
"num_tokens": 66501887.0,
"step": 158
},
{
"epoch": 1.448,
"grad_norm": 0.5480701615542537,
"learning_rate": 2.8873681715250197e-06,
"loss": 0.6382,
"num_tokens": 66899885.0,
"step": 159
},
{
"epoch": 1.457142857142857,
"grad_norm": 0.5270413445410719,
"learning_rate": 2.8625595324105925e-06,
"loss": 0.5933,
"num_tokens": 67307708.0,
"step": 160
},
{
"epoch": 1.4662857142857142,
"grad_norm": 0.5087274802990532,
"learning_rate": 2.8377143685881835e-06,
"loss": 0.5981,
"num_tokens": 67732755.0,
"step": 161
},
{
"epoch": 1.4754285714285715,
"grad_norm": 0.5584833225495552,
"learning_rate": 2.812835182991166e-06,
"loss": 0.6151,
"num_tokens": 68109847.0,
"step": 162
},
{
"epoch": 1.4845714285714287,
"grad_norm": 0.5337038825179844,
"learning_rate": 2.7879244819803104e-06,
"loss": 0.5916,
"num_tokens": 68557231.0,
"step": 163
},
{
"epoch": 1.4937142857142858,
"grad_norm": 0.5046455371902981,
"learning_rate": 2.7629847750912885e-06,
"loss": 0.6266,
"num_tokens": 68974066.0,
"step": 164
},
{
"epoch": 1.502857142857143,
"grad_norm": 0.5388161616170928,
"learning_rate": 2.7380185747818628e-06,
"loss": 0.6317,
"num_tokens": 69374302.0,
"step": 165
},
{
"epoch": 1.512,
"grad_norm": 0.5230970281548867,
"learning_rate": 2.713028396178776e-06,
"loss": 0.6587,
"num_tokens": 69819837.0,
"step": 166
},
{
"epoch": 1.5211428571428571,
"grad_norm": 0.526880337190175,
"learning_rate": 2.6880167568243716e-06,
"loss": 0.5943,
"num_tokens": 70216440.0,
"step": 167
},
{
"epoch": 1.5302857142857142,
"grad_norm": 0.5485816175958096,
"learning_rate": 2.6629861764229824e-06,
"loss": 0.6055,
"num_tokens": 70624223.0,
"step": 168
},
{
"epoch": 1.5394285714285716,
"grad_norm": 0.5305674288577678,
"learning_rate": 2.6379391765870828e-06,
"loss": 0.61,
"num_tokens": 71020172.0,
"step": 169
},
{
"epoch": 1.5485714285714285,
"grad_norm": 0.5500147924060435,
"learning_rate": 2.6128782805832605e-06,
"loss": 0.6316,
"num_tokens": 71443243.0,
"step": 170
},
{
"epoch": 1.5577142857142858,
"grad_norm": 0.545744633264797,
"learning_rate": 2.5878060130780225e-06,
"loss": 0.6343,
"num_tokens": 71842978.0,
"step": 171
},
{
"epoch": 1.5668571428571427,
"grad_norm": 0.5333068371067002,
"learning_rate": 2.562724899883458e-06,
"loss": 0.5962,
"num_tokens": 72246613.0,
"step": 172
},
{
"epoch": 1.576,
"grad_norm": 0.5151113808836872,
"learning_rate": 2.537637467702777e-06,
"loss": 0.5942,
"num_tokens": 72635238.0,
"step": 173
},
{
"epoch": 1.5851428571428572,
"grad_norm": 0.5097827368822562,
"learning_rate": 2.512546243875776e-06,
"loss": 0.6452,
"num_tokens": 73066900.0,
"step": 174
},
{
"epoch": 1.5942857142857143,
"grad_norm": 0.5050837954931957,
"learning_rate": 2.4874537561242253e-06,
"loss": 0.5983,
"num_tokens": 73459425.0,
"step": 175
},
{
"epoch": 1.6034285714285714,
"grad_norm": 0.5386528093096086,
"learning_rate": 2.462362532297224e-06,
"loss": 0.6212,
"num_tokens": 73846769.0,
"step": 176
},
{
"epoch": 1.6125714285714285,
"grad_norm": 0.5208969637241072,
"learning_rate": 2.4372751001165427e-06,
"loss": 0.5953,
"num_tokens": 74238603.0,
"step": 177
},
{
"epoch": 1.6217142857142857,
"grad_norm": 0.5300750981583515,
"learning_rate": 2.4121939869219784e-06,
"loss": 0.6385,
"num_tokens": 74619800.0,
"step": 178
},
{
"epoch": 1.6308571428571428,
"grad_norm": 0.4833782673351415,
"learning_rate": 2.3871217194167407e-06,
"loss": 0.6203,
"num_tokens": 75068223.0,
"step": 179
},
{
"epoch": 1.6400000000000001,
"grad_norm": 0.4940975179707354,
"learning_rate": 2.362060823412919e-06,
"loss": 0.6251,
"num_tokens": 75515755.0,
"step": 180
},
{
"epoch": 1.649142857142857,
"grad_norm": 0.4884850267837385,
"learning_rate": 2.3370138235770184e-06,
"loss": 0.5753,
"num_tokens": 75968696.0,
"step": 181
},
{
"epoch": 1.6582857142857144,
"grad_norm": 0.5130965951519948,
"learning_rate": 2.3119832431756284e-06,
"loss": 0.6274,
"num_tokens": 76374172.0,
"step": 182
},
{
"epoch": 1.6674285714285715,
"grad_norm": 0.5311768193897066,
"learning_rate": 2.286971603821226e-06,
"loss": 0.5945,
"num_tokens": 76766397.0,
"step": 183
},
{
"epoch": 1.6765714285714286,
"grad_norm": 0.4881821303904307,
"learning_rate": 2.261981425218138e-06,
"loss": 0.5932,
"num_tokens": 77197026.0,
"step": 184
},
{
"epoch": 1.6857142857142857,
"grad_norm": 0.47122072556306027,
"learning_rate": 2.2370152249087114e-06,
"loss": 0.58,
"num_tokens": 77665939.0,
"step": 185
},
{
"epoch": 1.6948571428571428,
"grad_norm": 0.5385304392622484,
"learning_rate": 2.2120755180196904e-06,
"loss": 0.6206,
"num_tokens": 78106107.0,
"step": 186
},
{
"epoch": 1.704,
"grad_norm": 0.49447572000428314,
"learning_rate": 2.1871648170088347e-06,
"loss": 0.5907,
"num_tokens": 78538377.0,
"step": 187
},
{
"epoch": 1.713142857142857,
"grad_norm": 0.5244933755348707,
"learning_rate": 2.1622856314118178e-06,
"loss": 0.5959,
"num_tokens": 78944694.0,
"step": 188
},
{
"epoch": 1.7222857142857144,
"grad_norm": 0.5301458412434723,
"learning_rate": 2.1374404675894083e-06,
"loss": 0.6361,
"num_tokens": 79395440.0,
"step": 189
},
{
"epoch": 1.7314285714285713,
"grad_norm": 0.5202435914893955,
"learning_rate": 2.1126318284749807e-06,
"loss": 0.6127,
"num_tokens": 79822802.0,
"step": 190
},
{
"epoch": 1.7405714285714287,
"grad_norm": 0.48495792096652884,
"learning_rate": 2.087862213322362e-06,
"loss": 0.6216,
"num_tokens": 80251827.0,
"step": 191
},
{
"epoch": 1.7497142857142856,
"grad_norm": 0.5039270578694471,
"learning_rate": 2.063134117454055e-06,
"loss": 0.6238,
"num_tokens": 80682166.0,
"step": 192
},
{
"epoch": 1.758857142857143,
"grad_norm": 0.5606686536962094,
"learning_rate": 2.0384500320098604e-06,
"loss": 0.6327,
"num_tokens": 81072109.0,
"step": 193
},
{
"epoch": 1.768,
"grad_norm": 0.5154032810828468,
"learning_rate": 2.01381244369591e-06,
"loss": 0.5878,
"num_tokens": 81463643.0,
"step": 194
},
{
"epoch": 1.7771428571428571,
"grad_norm": 0.5372934038703727,
"learning_rate": 1.9892238345341544e-06,
"loss": 0.6254,
"num_tokens": 81900194.0,
"step": 195
},
{
"epoch": 1.7862857142857143,
"grad_norm": 0.5372941594212604,
"learning_rate": 1.964686681612327e-06,
"loss": 0.6012,
"num_tokens": 82259830.0,
"step": 196
},
{
"epoch": 1.7954285714285714,
"grad_norm": 0.5074393400323167,
"learning_rate": 1.9402034568343888e-06,
"loss": 0.5949,
"num_tokens": 82667552.0,
"step": 197
},
{
"epoch": 1.8045714285714287,
"grad_norm": 0.5361364809103955,
"learning_rate": 1.9157766266715142e-06,
"loss": 0.6128,
"num_tokens": 83029946.0,
"step": 198
},
{
"epoch": 1.8137142857142856,
"grad_norm": 0.5028599124924883,
"learning_rate": 1.8914086519136133e-06,
"loss": 0.6024,
"num_tokens": 83437794.0,
"step": 199
},
{
"epoch": 1.822857142857143,
"grad_norm": 0.4915157876191495,
"learning_rate": 1.8671019874214237e-06,
"loss": 0.6247,
"num_tokens": 83857268.0,
"step": 200
},
{
"epoch": 1.8319999999999999,
"grad_norm": 0.4795616878883829,
"learning_rate": 1.8428590818792135e-06,
"loss": 0.597,
"num_tokens": 84283309.0,
"step": 201
},
{
"epoch": 1.8411428571428572,
"grad_norm": 0.4774342459205278,
"learning_rate": 1.8186823775480917e-06,
"loss": 0.6228,
"num_tokens": 84738929.0,
"step": 202
},
{
"epoch": 1.8502857142857143,
"grad_norm": 0.4943608455779546,
"learning_rate": 1.7945743100199706e-06,
"loss": 0.6322,
"num_tokens": 85153992.0,
"step": 203
},
{
"epoch": 1.8594285714285714,
"grad_norm": 0.4713560174328036,
"learning_rate": 1.7705373079722083e-06,
"loss": 0.642,
"num_tokens": 85623688.0,
"step": 204
},
{
"epoch": 1.8685714285714285,
"grad_norm": 0.5141695873278811,
"learning_rate": 1.7465737929229317e-06,
"loss": 0.6091,
"num_tokens": 86006323.0,
"step": 205
},
{
"epoch": 1.8777142857142857,
"grad_norm": 0.5000534330821961,
"learning_rate": 1.722686178987097e-06,
"loss": 0.6102,
"num_tokens": 86404792.0,
"step": 206
},
{
"epoch": 1.886857142857143,
"grad_norm": 0.5025822929123239,
"learning_rate": 1.6988768726332856e-06,
"loss": 0.6379,
"num_tokens": 86854752.0,
"step": 207
},
{
"epoch": 1.896,
"grad_norm": 0.49088754619114366,
"learning_rate": 1.6751482724412716e-06,
"loss": 0.6353,
"num_tokens": 87282488.0,
"step": 208
},
{
"epoch": 1.9051428571428572,
"grad_norm": 0.5251674366397446,
"learning_rate": 1.651502768860389e-06,
"loss": 0.5973,
"num_tokens": 87669853.0,
"step": 209
},
{
"epoch": 1.9142857142857141,
"grad_norm": 0.4689490751639795,
"learning_rate": 1.6279427439687154e-06,
"loss": 0.6304,
"num_tokens": 88130980.0,
"step": 210
},
{
"epoch": 1.9234285714285715,
"grad_norm": 0.472058082568105,
"learning_rate": 1.6044705712330932e-06,
"loss": 0.5638,
"num_tokens": 88570357.0,
"step": 211
},
{
"epoch": 1.9325714285714286,
"grad_norm": 0.4955095804093834,
"learning_rate": 1.5810886152700302e-06,
"loss": 0.6078,
"num_tokens": 88981662.0,
"step": 212
},
{
"epoch": 1.9417142857142857,
"grad_norm": 0.502372210861262,
"learning_rate": 1.5577992316074783e-06,
"loss": 0.6023,
"num_tokens": 89372451.0,
"step": 213
},
{
"epoch": 1.9508571428571428,
"grad_norm": 0.5079308074061759,
"learning_rate": 1.5346047664475422e-06,
"loss": 0.6403,
"num_tokens": 89802073.0,
"step": 214
},
{
"epoch": 1.96,
"grad_norm": 0.4700590581768767,
"learning_rate": 1.511507556430114e-06,
"loss": 0.628,
"num_tokens": 90260756.0,
"step": 215
},
{
"epoch": 1.9691428571428573,
"grad_norm": 0.50339097459113,
"learning_rate": 1.4885099283974774e-06,
"loss": 0.6428,
"num_tokens": 90666132.0,
"step": 216
},
{
"epoch": 1.9782857142857142,
"grad_norm": 0.514562609979602,
"learning_rate": 1.465614199159905e-06,
"loss": 0.6365,
"num_tokens": 91063409.0,
"step": 217
},
{
"epoch": 1.9874285714285715,
"grad_norm": 0.48835023162219804,
"learning_rate": 1.4428226752622509e-06,
"loss": 0.5954,
"num_tokens": 91473599.0,
"step": 218
},
{
"epoch": 1.9965714285714284,
"grad_norm": 0.47655828723147114,
"learning_rate": 1.420137652751593e-06,
"loss": 0.5973,
"num_tokens": 91933410.0,
"step": 219
},
{
"epoch": 2.0,
"grad_norm": 0.789231462279532,
"learning_rate": 1.3975614169459253e-06,
"loss": 0.6301,
"num_tokens": 92090067.0,
"step": 220
},
{
"epoch": 2.0091428571428573,
"grad_norm": 0.5711191577315374,
"learning_rate": 1.3750962422039269e-06,
"loss": 0.6032,
"num_tokens": 92546736.0,
"step": 221
},
{
"epoch": 2.0182857142857142,
"grad_norm": 0.5374729825328671,
"learning_rate": 1.3527443916958466e-06,
"loss": 0.5982,
"num_tokens": 92984957.0,
"step": 222
},
{
"epoch": 2.0274285714285716,
"grad_norm": 0.5318016866757004,
"learning_rate": 1.3305081171755092e-06,
"loss": 0.58,
"num_tokens": 93430346.0,
"step": 223
},
{
"epoch": 2.0365714285714285,
"grad_norm": 0.5378310465822619,
"learning_rate": 1.3083896587534606e-06,
"loss": 0.572,
"num_tokens": 93859357.0,
"step": 224
},
{
"epoch": 2.045714285714286,
"grad_norm": 0.5902220930968932,
"learning_rate": 1.2863912446713084e-06,
"loss": 0.5815,
"num_tokens": 94235216.0,
"step": 225
},
{
"epoch": 2.0548571428571427,
"grad_norm": 0.5405043387197087,
"learning_rate": 1.2645150910772413e-06,
"loss": 0.5617,
"num_tokens": 94651501.0,
"step": 226
},
{
"epoch": 2.064,
"grad_norm": 0.5385666206793902,
"learning_rate": 1.2427634018027673e-06,
"loss": 0.5644,
"num_tokens": 95069945.0,
"step": 227
},
{
"epoch": 2.073142857142857,
"grad_norm": 0.4997592766482857,
"learning_rate": 1.2211383681407022e-06,
"loss": 0.5664,
"num_tokens": 95524927.0,
"step": 228
},
{
"epoch": 2.0822857142857143,
"grad_norm": 0.5529900645674626,
"learning_rate": 1.1996421686244179e-06,
"loss": 0.5776,
"num_tokens": 95918641.0,
"step": 229
},
{
"epoch": 2.0914285714285716,
"grad_norm": 0.5415131513338728,
"learning_rate": 1.1782769688083647e-06,
"loss": 0.5786,
"num_tokens": 96308405.0,
"step": 230
},
{
"epoch": 2.1005714285714285,
"grad_norm": 0.5354002873656379,
"learning_rate": 1.1570449210499213e-06,
"loss": 0.5613,
"num_tokens": 96732646.0,
"step": 231
},
{
"epoch": 2.109714285714286,
"grad_norm": 0.5430380283286926,
"learning_rate": 1.135948164292557e-06,
"loss": 0.542,
"num_tokens": 97156843.0,
"step": 232
},
{
"epoch": 2.1188571428571428,
"grad_norm": 0.5301770327914718,
"learning_rate": 1.1149888238503537e-06,
"loss": 0.5774,
"num_tokens": 97606726.0,
"step": 233
},
{
"epoch": 2.128,
"grad_norm": 0.5525594315362994,
"learning_rate": 1.0941690111939002e-06,
"loss": 0.5566,
"num_tokens": 97986735.0,
"step": 234
},
{
"epoch": 2.137142857142857,
"grad_norm": 0.4995496823018478,
"learning_rate": 1.0734908237375783e-06,
"loss": 0.5766,
"num_tokens": 98468501.0,
"step": 235
},
{
"epoch": 2.1462857142857144,
"grad_norm": 0.5378604882040718,
"learning_rate": 1.0529563446282665e-06,
"loss": 0.5632,
"num_tokens": 98903725.0,
"step": 236
},
{
"epoch": 2.1554285714285712,
"grad_norm": 0.5207373128952933,
"learning_rate": 1.0325676425354828e-06,
"loss": 0.5636,
"num_tokens": 99331228.0,
"step": 237
},
{
"epoch": 2.1645714285714286,
"grad_norm": 0.5256645631693067,
"learning_rate": 1.0123267714429826e-06,
"loss": 0.5922,
"num_tokens": 99763837.0,
"step": 238
},
{
"epoch": 2.1737142857142855,
"grad_norm": 0.5321491484630023,
"learning_rate": 9.922357704418394e-07,
"loss": 0.5849,
"num_tokens": 100178817.0,
"step": 239
},
{
"epoch": 2.182857142857143,
"grad_norm": 0.5138997122471298,
"learning_rate": 9.722966635250222e-07,
"loss": 0.5401,
"num_tokens": 100610730.0,
"step": 240
},
{
"epoch": 2.192,
"grad_norm": 0.4943367780588382,
"learning_rate": 9.525114593834975e-07,
"loss": 0.5851,
"num_tokens": 101068964.0,
"step": 241
},
{
"epoch": 2.201142857142857,
"grad_norm": 0.5154577499927364,
"learning_rate": 9.328821512038716e-07,
"loss": 0.5884,
"num_tokens": 101487081.0,
"step": 242
},
{
"epoch": 2.2102857142857144,
"grad_norm": 0.5222259240916006,
"learning_rate": 9.134107164675898e-07,
"loss": 0.5801,
"num_tokens": 101904000.0,
"step": 243
},
{
"epoch": 2.2194285714285713,
"grad_norm": 0.5259169693667993,
"learning_rate": 8.940991167517313e-07,
"loss": 0.5807,
"num_tokens": 102307191.0,
"step": 244
},
{
"epoch": 2.2285714285714286,
"grad_norm": 0.5206118106771275,
"learning_rate": 8.749492975313897e-07,
"loss": 0.5614,
"num_tokens": 102716603.0,
"step": 245
},
{
"epoch": 2.2377142857142855,
"grad_norm": 0.5219924329022366,
"learning_rate": 8.559631879836838e-07,
"loss": 0.584,
"num_tokens": 103129732.0,
"step": 246
},
{
"epoch": 2.246857142857143,
"grad_norm": 0.4964769891153005,
"learning_rate": 8.371427007934174e-07,
"loss": 0.5742,
"num_tokens": 103552339.0,
"step": 247
},
{
"epoch": 2.2560000000000002,
"grad_norm": 0.507390544236327,
"learning_rate": 8.184897319603813e-07,
"loss": 0.6142,
"num_tokens": 103987991.0,
"step": 248
},
{
"epoch": 2.265142857142857,
"grad_norm": 0.4989116246504005,
"learning_rate": 8.000061606083579e-07,
"loss": 0.6119,
"num_tokens": 104429673.0,
"step": 249
},
{
"epoch": 2.2742857142857145,
"grad_norm": 0.5098053191178704,
"learning_rate": 7.816938487958131e-07,
"loss": 0.5546,
"num_tokens": 104841554.0,
"step": 250
},
{
"epoch": 2.2834285714285714,
"grad_norm": 0.5089115422799331,
"learning_rate": 7.635546413283054e-07,
"loss": 0.5832,
"num_tokens": 105253055.0,
"step": 251
},
{
"epoch": 2.2925714285714287,
"grad_norm": 0.5348285772467526,
"learning_rate": 7.455903655726437e-07,
"loss": 0.5262,
"num_tokens": 105614395.0,
"step": 252
},
{
"epoch": 2.3017142857142856,
"grad_norm": 0.5320710749897903,
"learning_rate": 7.278028312727961e-07,
"loss": 0.5374,
"num_tokens": 105984595.0,
"step": 253
},
{
"epoch": 2.310857142857143,
"grad_norm": 0.5015290198043492,
"learning_rate": 7.101938303675674e-07,
"loss": 0.5674,
"num_tokens": 106417649.0,
"step": 254
},
{
"epoch": 2.32,
"grad_norm": 0.5164124073124037,
"learning_rate": 6.927651368100843e-07,
"loss": 0.5702,
"num_tokens": 106818638.0,
"step": 255
},
{
"epoch": 2.329142857142857,
"grad_norm": 0.4846215779170027,
"learning_rate": 6.755185063890818e-07,
"loss": 0.5509,
"num_tokens": 107258625.0,
"step": 256
},
{
"epoch": 2.338285714285714,
"grad_norm": 0.5097907143887082,
"learning_rate": 6.584556765520231e-07,
"loss": 0.5798,
"num_tokens": 107680389.0,
"step": 257
},
{
"epoch": 2.3474285714285714,
"grad_norm": 0.5068807711002643,
"learning_rate": 6.415783662300662e-07,
"loss": 0.5877,
"num_tokens": 108115625.0,
"step": 258
},
{
"epoch": 2.3565714285714288,
"grad_norm": 0.5179162356392562,
"learning_rate": 6.248882756648988e-07,
"loss": 0.5623,
"num_tokens": 108526100.0,
"step": 259
},
{
"epoch": 2.3657142857142857,
"grad_norm": 0.5074768216794914,
"learning_rate": 6.083870862374513e-07,
"loss": 0.5701,
"num_tokens": 108943744.0,
"step": 260
},
{
"epoch": 2.374857142857143,
"grad_norm": 0.4954184591704928,
"learning_rate": 5.920764602985141e-07,
"loss": 0.562,
"num_tokens": 109381379.0,
"step": 261
},
{
"epoch": 2.384,
"grad_norm": 0.5147752391401487,
"learning_rate": 5.759580410012691e-07,
"loss": 0.565,
"num_tokens": 109793247.0,
"step": 262
},
{
"epoch": 2.3931428571428572,
"grad_norm": 0.5253288675199733,
"learning_rate": 5.600334521357581e-07,
"loss": 0.595,
"num_tokens": 110202872.0,
"step": 263
},
{
"epoch": 2.402285714285714,
"grad_norm": 0.48483979120150505,
"learning_rate": 5.443042979652957e-07,
"loss": 0.5725,
"num_tokens": 110639527.0,
"step": 264
},
{
"epoch": 2.4114285714285715,
"grad_norm": 0.5039540186447794,
"learning_rate": 5.287721630648615e-07,
"loss": 0.5826,
"num_tokens": 111073504.0,
"step": 265
},
{
"epoch": 2.420571428571429,
"grad_norm": 0.5283553665176294,
"learning_rate": 5.134386121614615e-07,
"loss": 0.5598,
"num_tokens": 111473527.0,
"step": 266
},
{
"epoch": 2.4297142857142857,
"grad_norm": 0.5021929773072068,
"learning_rate": 4.983051899764946e-07,
"loss": 0.5467,
"num_tokens": 111888096.0,
"step": 267
},
{
"epoch": 2.4388571428571426,
"grad_norm": 0.5086106631732937,
"learning_rate": 4.833734210701435e-07,
"loss": 0.5653,
"num_tokens": 112274040.0,
"step": 268
},
{
"epoch": 2.448,
"grad_norm": 0.5070750574100812,
"learning_rate": 4.6864480968778103e-07,
"loss": 0.5854,
"num_tokens": 112701254.0,
"step": 269
},
{
"epoch": 2.4571428571428573,
"grad_norm": 0.5389372884949655,
"learning_rate": 4.541208396084304e-07,
"loss": 0.569,
"num_tokens": 113084811.0,
"step": 270
},
{
"epoch": 2.466285714285714,
"grad_norm": 0.4954996531070081,
"learning_rate": 4.39802973995295e-07,
"loss": 0.5931,
"num_tokens": 113515668.0,
"step": 271
},
{
"epoch": 2.4754285714285715,
"grad_norm": 0.5335874562537316,
"learning_rate": 4.2569265524834756e-07,
"loss": 0.5588,
"num_tokens": 113895133.0,
"step": 272
},
{
"epoch": 2.4845714285714284,
"grad_norm": 0.4974120943217034,
"learning_rate": 4.117913048590283e-07,
"loss": 0.5723,
"num_tokens": 114325228.0,
"step": 273
},
{
"epoch": 2.4937142857142858,
"grad_norm": 0.47465781973096216,
"learning_rate": 3.9810032326704106e-07,
"loss": 0.5359,
"num_tokens": 114773958.0,
"step": 274
},
{
"epoch": 2.5028571428571427,
"grad_norm": 0.4954990612858109,
"learning_rate": 3.8462108971926564e-07,
"loss": 0.5797,
"num_tokens": 115212487.0,
"step": 275
},
{
"epoch": 2.512,
"grad_norm": 0.4968181717935684,
"learning_rate": 3.713549621308174e-07,
"loss": 0.5942,
"num_tokens": 115622910.0,
"step": 276
},
{
"epoch": 2.5211428571428574,
"grad_norm": 0.5082368933542878,
"learning_rate": 3.5830327694824777e-07,
"loss": 0.5652,
"num_tokens": 116027725.0,
"step": 277
},
{
"epoch": 2.5302857142857142,
"grad_norm": 0.4866800840052398,
"learning_rate": 3.4546734901490466e-07,
"loss": 0.5783,
"num_tokens": 116475926.0,
"step": 278
},
{
"epoch": 2.5394285714285716,
"grad_norm": 0.5091530695044061,
"learning_rate": 3.3284847143847834e-07,
"loss": 0.5903,
"num_tokens": 116890461.0,
"step": 279
},
{
"epoch": 2.5485714285714285,
"grad_norm": 0.5115099249876448,
"learning_rate": 3.2044791546072985e-07,
"loss": 0.5618,
"num_tokens": 117282195.0,
"step": 280
},
{
"epoch": 2.557714285714286,
"grad_norm": 0.5050583956238427,
"learning_rate": 3.0826693032942586e-07,
"loss": 0.5711,
"num_tokens": 117710503.0,
"step": 281
},
{
"epoch": 2.5668571428571427,
"grad_norm": 0.5374803213347745,
"learning_rate": 2.963067431724856e-07,
"loss": 0.5629,
"num_tokens": 118081184.0,
"step": 282
},
{
"epoch": 2.576,
"grad_norm": 0.4984343530594435,
"learning_rate": 2.8456855887436074e-07,
"loss": 0.5765,
"num_tokens": 118489510.0,
"step": 283
},
{
"epoch": 2.5851428571428574,
"grad_norm": 0.5090570265753169,
"learning_rate": 2.730535599546524e-07,
"loss": 0.5999,
"num_tokens": 118876624.0,
"step": 284
},
{
"epoch": 2.5942857142857143,
"grad_norm": 0.519781544356009,
"learning_rate": 2.617629064489838e-07,
"loss": 0.6156,
"num_tokens": 119323858.0,
"step": 285
},
{
"epoch": 2.603428571428571,
"grad_norm": 0.48368780311253534,
"learning_rate": 2.50697735792135e-07,
"loss": 0.5865,
"num_tokens": 119746235.0,
"step": 286
},
{
"epoch": 2.6125714285714285,
"grad_norm": 0.6677870618002503,
"learning_rate": 2.398591627034588e-07,
"loss": 0.5738,
"num_tokens": 120176419.0,
"step": 287
},
{
"epoch": 2.621714285714286,
"grad_norm": 0.5161420445880435,
"learning_rate": 2.2924827907457841e-07,
"loss": 0.602,
"num_tokens": 120585061.0,
"step": 288
},
{
"epoch": 2.630857142857143,
"grad_norm": 0.4953371088579667,
"learning_rate": 2.1886615385939502e-07,
"loss": 0.5847,
"num_tokens": 121022910.0,
"step": 289
},
{
"epoch": 2.64,
"grad_norm": 0.4700637789497405,
"learning_rate": 2.0871383296639487e-07,
"loss": 0.574,
"num_tokens": 121496265.0,
"step": 290
},
{
"epoch": 2.649142857142857,
"grad_norm": 0.48662101727536294,
"learning_rate": 1.9879233915328312e-07,
"loss": 0.5765,
"num_tokens": 121936055.0,
"step": 291
},
{
"epoch": 2.6582857142857144,
"grad_norm": 0.5099077987670947,
"learning_rate": 1.891026719239547e-07,
"loss": 0.5599,
"num_tokens": 122342895.0,
"step": 292
},
{
"epoch": 2.6674285714285713,
"grad_norm": 0.4938591026159351,
"learning_rate": 1.7964580742779847e-07,
"loss": 0.6128,
"num_tokens": 122770275.0,
"step": 293
},
{
"epoch": 2.6765714285714286,
"grad_norm": 0.4870135117164128,
"learning_rate": 1.7042269836135882e-07,
"loss": 0.5931,
"num_tokens": 123192007.0,
"step": 294
},
{
"epoch": 2.685714285714286,
"grad_norm": 0.5240800119635359,
"learning_rate": 1.6143427387236455e-07,
"loss": 0.5996,
"num_tokens": 123596083.0,
"step": 295
},
{
"epoch": 2.694857142857143,
"grad_norm": 0.5217629720972092,
"learning_rate": 1.5268143946611802e-07,
"loss": 0.605,
"num_tokens": 124000214.0,
"step": 296
},
{
"epoch": 2.7039999999999997,
"grad_norm": 0.4949853085092262,
"learning_rate": 1.441650769142791e-07,
"loss": 0.5705,
"num_tokens": 124420120.0,
"step": 297
},
{
"epoch": 2.713142857142857,
"grad_norm": 0.49569537617283155,
"learning_rate": 1.3588604416603424e-07,
"loss": 0.5722,
"num_tokens": 124837441.0,
"step": 298
},
{
"epoch": 2.7222857142857144,
"grad_norm": 0.5148571496736348,
"learning_rate": 1.278451752616608e-07,
"loss": 0.582,
"num_tokens": 125229470.0,
"step": 299
},
{
"epoch": 2.7314285714285713,
"grad_norm": 0.4755041735866527,
"learning_rate": 1.2004328024850938e-07,
"loss": 0.5543,
"num_tokens": 125662137.0,
"step": 300
},
{
"epoch": 2.7405714285714287,
"grad_norm": 0.5106445243079593,
"learning_rate": 1.1248114509939817e-07,
"loss": 0.5949,
"num_tokens": 126052227.0,
"step": 301
},
{
"epoch": 2.7497142857142856,
"grad_norm": 0.4914711633020567,
"learning_rate": 1.0515953163342973e-07,
"loss": 0.593,
"num_tokens": 126455360.0,
"step": 302
},
{
"epoch": 2.758857142857143,
"grad_norm": 0.49545896667246636,
"learning_rate": 9.807917743924838e-08,
"loss": 0.5458,
"num_tokens": 126870673.0,
"step": 303
},
{
"epoch": 2.768,
"grad_norm": 0.49575860288282114,
"learning_rate": 9.12407958007322e-08,
"loss": 0.5402,
"num_tokens": 127271445.0,
"step": 304
},
{
"epoch": 2.777142857142857,
"grad_norm": 0.4915092472121106,
"learning_rate": 8.464507562513657e-08,
"loss": 0.6131,
"num_tokens": 127712507.0,
"step": 305
},
{
"epoch": 2.7862857142857145,
"grad_norm": 0.4961962340392434,
"learning_rate": 7.829268137369311e-08,
"loss": 0.5851,
"num_tokens": 128132380.0,
"step": 306
},
{
"epoch": 2.7954285714285714,
"grad_norm": 0.4752993340814379,
"learning_rate": 7.21842529946698e-08,
"loss": 0.6129,
"num_tokens": 128588339.0,
"step": 307
},
{
"epoch": 2.8045714285714287,
"grad_norm": 0.503510989247386,
"learning_rate": 6.632040585890398e-08,
"loss": 0.5674,
"num_tokens": 129005751.0,
"step": 308
},
{
"epoch": 2.8137142857142856,
"grad_norm": 0.4724716751556215,
"learning_rate": 6.070173069780638e-08,
"loss": 0.562,
"num_tokens": 129476694.0,
"step": 309
},
{
"epoch": 2.822857142857143,
"grad_norm": 0.48385278796222503,
"learning_rate": 5.532879354385234e-08,
"loss": 0.5745,
"num_tokens": 129914320.0,
"step": 310
},
{
"epoch": 2.832,
"grad_norm": 0.48514365333284165,
"learning_rate": 5.020213567355825e-08,
"loss": 0.5667,
"num_tokens": 130364577.0,
"step": 311
},
{
"epoch": 2.841142857142857,
"grad_norm": 0.4780875564800783,
"learning_rate": 4.5322273552951265e-08,
"loss": 0.5897,
"num_tokens": 130817643.0,
"step": 312
},
{
"epoch": 2.8502857142857145,
"grad_norm": 0.4898713768327533,
"learning_rate": 4.068969878554263e-08,
"loss": 0.5874,
"num_tokens": 131252219.0,
"step": 313
},
{
"epoch": 2.8594285714285714,
"grad_norm": 0.47651774728183893,
"learning_rate": 3.630487806280086e-08,
"loss": 0.583,
"num_tokens": 131680425.0,
"step": 314
},
{
"epoch": 2.8685714285714283,
"grad_norm": 0.49334334956687725,
"learning_rate": 3.216825311713689e-08,
"loss": 0.6196,
"num_tokens": 132110282.0,
"step": 315
},
{
"epoch": 2.8777142857142857,
"grad_norm": 0.47081742859201037,
"learning_rate": 2.8280240677403813e-08,
"loss": 0.6073,
"num_tokens": 132564761.0,
"step": 316
},
{
"epoch": 2.886857142857143,
"grad_norm": 0.489336125798635,
"learning_rate": 2.464123242691574e-08,
"loss": 0.5928,
"num_tokens": 133016872.0,
"step": 317
},
{
"epoch": 2.896,
"grad_norm": 0.5047605121395512,
"learning_rate": 2.1251594963986876e-08,
"loss": 0.5916,
"num_tokens": 133419747.0,
"step": 318
},
{
"epoch": 2.9051428571428572,
"grad_norm": 0.5055723323188881,
"learning_rate": 1.8111669765003005e-08,
"loss": 0.6042,
"num_tokens": 133842060.0,
"step": 319
},
{
"epoch": 2.914285714285714,
"grad_norm": 0.5311741921831599,
"learning_rate": 1.5221773150017882e-08,
"loss": 0.5986,
"num_tokens": 134246398.0,
"step": 320
},
{
"epoch": 2.9234285714285715,
"grad_norm": 0.48458962339074657,
"learning_rate": 1.2582196250888745e-08,
"loss": 0.58,
"num_tokens": 134665906.0,
"step": 321
},
{
"epoch": 2.9325714285714284,
"grad_norm": 0.5273108133161314,
"learning_rate": 1.0193204981946426e-08,
"loss": 0.5655,
"num_tokens": 135058042.0,
"step": 322
},
{
"epoch": 2.9417142857142857,
"grad_norm": 0.4986724954854335,
"learning_rate": 8.055040013207061e-09,
"loss": 0.6096,
"num_tokens": 135459374.0,
"step": 323
},
{
"epoch": 2.950857142857143,
"grad_norm": 0.47288278261543093,
"learning_rate": 6.1679167461262124e-09,
"loss": 0.544,
"num_tokens": 135926099.0,
"step": 324
},
{
"epoch": 2.96,
"grad_norm": 0.48901998463055885,
"learning_rate": 4.53202529190011e-09,
"loss": 0.5862,
"num_tokens": 136330163.0,
"step": 325
},
{
"epoch": 2.9691428571428573,
"grad_norm": 0.4843858034119068,
"learning_rate": 3.147530452311809e-09,
"loss": 0.5486,
"num_tokens": 136750371.0,
"step": 326
},
{
"epoch": 2.978285714285714,
"grad_norm": 0.508393418386821,
"learning_rate": 2.01457170313113e-09,
"loss": 0.5906,
"num_tokens": 137146494.0,
"step": 327
},
{
"epoch": 2.9874285714285715,
"grad_norm": 0.493149265400495,
"learning_rate": 1.1332631800620164e-09,
"loss": 0.5554,
"num_tokens": 137571606.0,
"step": 328
},
{
"epoch": 2.9965714285714284,
"grad_norm": 0.5141360605578703,
"learning_rate": 5.036936672447868e-10,
"loss": 0.5281,
"num_tokens": 137971073.0,
"step": 329
},
{
"epoch": 3.0,
"grad_norm": 0.7668987334851985,
"learning_rate": 1.2592658831245274e-10,
"loss": 0.5627,
"num_tokens": 138142278.0,
"step": 330
}
],
"logging_steps": 1,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.200567398630162e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}