model_2d3a11bd / checkpoint-230 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
3ec41e9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.994236311239193,
"eval_steps": 500,
"global_step": 230,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008645533141210375,
"grad_norm": 34.72848892211914,
"learning_rate": 5.0000000000000004e-08,
"loss": 2.477,
"step": 1
},
{
"epoch": 0.01729106628242075,
"grad_norm": 33.733909606933594,
"learning_rate": 1.0000000000000001e-07,
"loss": 2.4134,
"step": 2
},
{
"epoch": 0.025936599423631124,
"grad_norm": 34.543819427490234,
"learning_rate": 1.5000000000000002e-07,
"loss": 2.4467,
"step": 3
},
{
"epoch": 0.0345821325648415,
"grad_norm": 35.37831115722656,
"learning_rate": 2.0000000000000002e-07,
"loss": 2.4801,
"step": 4
},
{
"epoch": 0.043227665706051875,
"grad_norm": 33.97856140136719,
"learning_rate": 2.5000000000000004e-07,
"loss": 2.4422,
"step": 5
},
{
"epoch": 0.05187319884726225,
"grad_norm": 34.11160659790039,
"learning_rate": 3.0000000000000004e-07,
"loss": 2.4003,
"step": 6
},
{
"epoch": 0.06051873198847262,
"grad_norm": 34.086463928222656,
"learning_rate": 3.5000000000000004e-07,
"loss": 2.4211,
"step": 7
},
{
"epoch": 0.069164265129683,
"grad_norm": 33.96665573120117,
"learning_rate": 4.0000000000000003e-07,
"loss": 2.4054,
"step": 8
},
{
"epoch": 0.07780979827089338,
"grad_norm": 34.871307373046875,
"learning_rate": 4.5000000000000003e-07,
"loss": 2.4077,
"step": 9
},
{
"epoch": 0.08645533141210375,
"grad_norm": 33.91160583496094,
"learning_rate": 5.000000000000001e-07,
"loss": 2.3598,
"step": 10
},
{
"epoch": 0.09510086455331412,
"grad_norm": 33.40217971801758,
"learning_rate": 5.5e-07,
"loss": 2.3209,
"step": 11
},
{
"epoch": 0.1037463976945245,
"grad_norm": 33.771121978759766,
"learning_rate": 6.000000000000001e-07,
"loss": 2.3178,
"step": 12
},
{
"epoch": 0.11239193083573487,
"grad_norm": 33.577476501464844,
"learning_rate": 6.5e-07,
"loss": 2.3107,
"step": 13
},
{
"epoch": 0.12103746397694524,
"grad_norm": 31.330514907836914,
"learning_rate": 7.000000000000001e-07,
"loss": 2.095,
"step": 14
},
{
"epoch": 0.12968299711815562,
"grad_norm": 33.60646057128906,
"learning_rate": 7.5e-07,
"loss": 2.1663,
"step": 15
},
{
"epoch": 0.138328530259366,
"grad_norm": 31.96607208251953,
"learning_rate": 8.000000000000001e-07,
"loss": 1.9986,
"step": 16
},
{
"epoch": 0.14697406340057637,
"grad_norm": 32.33183288574219,
"learning_rate": 8.500000000000001e-07,
"loss": 1.9401,
"step": 17
},
{
"epoch": 0.15561959654178675,
"grad_norm": 33.50197219848633,
"learning_rate": 9.000000000000001e-07,
"loss": 1.8529,
"step": 18
},
{
"epoch": 0.1642651296829971,
"grad_norm": 33.55098342895508,
"learning_rate": 9.500000000000001e-07,
"loss": 1.7087,
"step": 19
},
{
"epoch": 0.1729106628242075,
"grad_norm": 33.90129852294922,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.6324,
"step": 20
},
{
"epoch": 0.18155619596541786,
"grad_norm": 33.738037109375,
"learning_rate": 1.0500000000000001e-06,
"loss": 1.4896,
"step": 21
},
{
"epoch": 0.19020172910662825,
"grad_norm": 33.6258659362793,
"learning_rate": 1.1e-06,
"loss": 1.3664,
"step": 22
},
{
"epoch": 0.1988472622478386,
"grad_norm": 30.822349548339844,
"learning_rate": 1.1500000000000002e-06,
"loss": 1.2193,
"step": 23
},
{
"epoch": 0.207492795389049,
"grad_norm": 29.608501434326172,
"learning_rate": 1.2000000000000002e-06,
"loss": 1.074,
"step": 24
},
{
"epoch": 0.21613832853025935,
"grad_norm": 27.651105880737305,
"learning_rate": 1.25e-06,
"loss": 0.938,
"step": 25
},
{
"epoch": 0.22478386167146974,
"grad_norm": 29.479637145996094,
"learning_rate": 1.3e-06,
"loss": 0.7728,
"step": 26
},
{
"epoch": 0.2334293948126801,
"grad_norm": 29.068634033203125,
"learning_rate": 1.3500000000000002e-06,
"loss": 0.6051,
"step": 27
},
{
"epoch": 0.2420749279538905,
"grad_norm": 24.850099563598633,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.4463,
"step": 28
},
{
"epoch": 0.2507204610951009,
"grad_norm": 22.095216751098633,
"learning_rate": 1.45e-06,
"loss": 0.3489,
"step": 29
},
{
"epoch": 0.25936599423631124,
"grad_norm": 19.491201400756836,
"learning_rate": 1.5e-06,
"loss": 0.261,
"step": 30
},
{
"epoch": 0.2680115273775216,
"grad_norm": 14.492341041564941,
"learning_rate": 1.5500000000000002e-06,
"loss": 0.1707,
"step": 31
},
{
"epoch": 0.276657060518732,
"grad_norm": 6.020577907562256,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.1151,
"step": 32
},
{
"epoch": 0.28530259365994237,
"grad_norm": 2.90791916847229,
"learning_rate": 1.6500000000000003e-06,
"loss": 0.0976,
"step": 33
},
{
"epoch": 0.29394812680115273,
"grad_norm": 2.637803554534912,
"learning_rate": 1.7000000000000002e-06,
"loss": 0.0911,
"step": 34
},
{
"epoch": 0.3025936599423631,
"grad_norm": 1.804861068725586,
"learning_rate": 1.75e-06,
"loss": 0.0834,
"step": 35
},
{
"epoch": 0.3112391930835735,
"grad_norm": 2.049024820327759,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.0842,
"step": 36
},
{
"epoch": 0.31988472622478387,
"grad_norm": 1.3263498544692993,
"learning_rate": 1.85e-06,
"loss": 0.0744,
"step": 37
},
{
"epoch": 0.3285302593659942,
"grad_norm": 1.7187089920043945,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.0783,
"step": 38
},
{
"epoch": 0.3371757925072046,
"grad_norm": 1.3925131559371948,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.073,
"step": 39
},
{
"epoch": 0.345821325648415,
"grad_norm": 1.2181739807128906,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0749,
"step": 40
},
{
"epoch": 0.35446685878962536,
"grad_norm": 1.0519245862960815,
"learning_rate": 2.05e-06,
"loss": 0.0692,
"step": 41
},
{
"epoch": 0.3631123919308357,
"grad_norm": 0.9188923835754395,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.0726,
"step": 42
},
{
"epoch": 0.37175792507204614,
"grad_norm": 0.8273228406906128,
"learning_rate": 2.15e-06,
"loss": 0.0651,
"step": 43
},
{
"epoch": 0.3804034582132565,
"grad_norm": 0.9098994135856628,
"learning_rate": 2.2e-06,
"loss": 0.066,
"step": 44
},
{
"epoch": 0.38904899135446686,
"grad_norm": 0.8456838726997375,
"learning_rate": 2.25e-06,
"loss": 0.0646,
"step": 45
},
{
"epoch": 0.3976945244956772,
"grad_norm": 0.8240940570831299,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.0604,
"step": 46
},
{
"epoch": 0.40634005763688763,
"grad_norm": 1.111759901046753,
"learning_rate": 2.35e-06,
"loss": 0.0589,
"step": 47
},
{
"epoch": 0.414985590778098,
"grad_norm": 0.9933035373687744,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.0613,
"step": 48
},
{
"epoch": 0.42363112391930835,
"grad_norm": 0.7491716742515564,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.0583,
"step": 49
},
{
"epoch": 0.4322766570605187,
"grad_norm": 0.9089523553848267,
"learning_rate": 2.5e-06,
"loss": 0.0558,
"step": 50
},
{
"epoch": 0.4409221902017291,
"grad_norm": 0.7088611721992493,
"learning_rate": 2.55e-06,
"loss": 0.0553,
"step": 51
},
{
"epoch": 0.4495677233429395,
"grad_norm": 0.7892571091651917,
"learning_rate": 2.6e-06,
"loss": 0.064,
"step": 52
},
{
"epoch": 0.45821325648414984,
"grad_norm": 0.9248467087745667,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.0653,
"step": 53
},
{
"epoch": 0.4668587896253602,
"grad_norm": 0.7224969863891602,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0611,
"step": 54
},
{
"epoch": 0.4755043227665706,
"grad_norm": 0.8231533765792847,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0617,
"step": 55
},
{
"epoch": 0.484149855907781,
"grad_norm": 0.7306967973709106,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0571,
"step": 56
},
{
"epoch": 0.49279538904899134,
"grad_norm": 0.8415323495864868,
"learning_rate": 2.85e-06,
"loss": 0.0548,
"step": 57
},
{
"epoch": 0.5014409221902018,
"grad_norm": 1.5560295581817627,
"learning_rate": 2.9e-06,
"loss": 0.0672,
"step": 58
},
{
"epoch": 0.5100864553314121,
"grad_norm": 0.9170955419540405,
"learning_rate": 2.95e-06,
"loss": 0.068,
"step": 59
},
{
"epoch": 0.5187319884726225,
"grad_norm": 0.6508005857467651,
"learning_rate": 3e-06,
"loss": 0.057,
"step": 60
},
{
"epoch": 0.5273775216138329,
"grad_norm": 0.8307355642318726,
"learning_rate": 3.05e-06,
"loss": 0.0598,
"step": 61
},
{
"epoch": 0.5360230547550432,
"grad_norm": 1.11078679561615,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.0575,
"step": 62
},
{
"epoch": 0.5446685878962536,
"grad_norm": 1.0765758752822876,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0615,
"step": 63
},
{
"epoch": 0.553314121037464,
"grad_norm": 0.8381508588790894,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0606,
"step": 64
},
{
"epoch": 0.5619596541786743,
"grad_norm": 1.007628321647644,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0572,
"step": 65
},
{
"epoch": 0.5706051873198847,
"grad_norm": 0.7254197597503662,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.0543,
"step": 66
},
{
"epoch": 0.579250720461095,
"grad_norm": 0.5906903147697449,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0557,
"step": 67
},
{
"epoch": 0.5878962536023055,
"grad_norm": 0.6791537404060364,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0516,
"step": 68
},
{
"epoch": 0.5965417867435159,
"grad_norm": 0.6390945315361023,
"learning_rate": 3.45e-06,
"loss": 0.0529,
"step": 69
},
{
"epoch": 0.6051873198847262,
"grad_norm": 0.61552494764328,
"learning_rate": 3.5e-06,
"loss": 0.0554,
"step": 70
},
{
"epoch": 0.6138328530259366,
"grad_norm": 0.6545206308364868,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0545,
"step": 71
},
{
"epoch": 0.622478386167147,
"grad_norm": 0.9062793254852295,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0559,
"step": 72
},
{
"epoch": 0.6311239193083573,
"grad_norm": 0.9642562866210938,
"learning_rate": 3.65e-06,
"loss": 0.0485,
"step": 73
},
{
"epoch": 0.6397694524495677,
"grad_norm": 0.7286660075187683,
"learning_rate": 3.7e-06,
"loss": 0.0564,
"step": 74
},
{
"epoch": 0.6484149855907781,
"grad_norm": 0.6356053948402405,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.053,
"step": 75
},
{
"epoch": 0.6570605187319885,
"grad_norm": 0.706794261932373,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.0531,
"step": 76
},
{
"epoch": 0.6657060518731989,
"grad_norm": 0.6616448163986206,
"learning_rate": 3.85e-06,
"loss": 0.0504,
"step": 77
},
{
"epoch": 0.6743515850144092,
"grad_norm": 0.7465748190879822,
"learning_rate": 3.900000000000001e-06,
"loss": 0.0495,
"step": 78
},
{
"epoch": 0.6829971181556196,
"grad_norm": 0.8153467774391174,
"learning_rate": 3.95e-06,
"loss": 0.0495,
"step": 79
},
{
"epoch": 0.69164265129683,
"grad_norm": 0.7728897333145142,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0434,
"step": 80
},
{
"epoch": 0.7002881844380403,
"grad_norm": 0.6173391938209534,
"learning_rate": 4.05e-06,
"loss": 0.0432,
"step": 81
},
{
"epoch": 0.7089337175792507,
"grad_norm": 0.7128047943115234,
"learning_rate": 4.1e-06,
"loss": 0.0512,
"step": 82
},
{
"epoch": 0.7175792507204611,
"grad_norm": 0.6098653674125671,
"learning_rate": 4.15e-06,
"loss": 0.0415,
"step": 83
},
{
"epoch": 0.7262247838616714,
"grad_norm": 0.7464293241500854,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.0453,
"step": 84
},
{
"epoch": 0.7348703170028819,
"grad_norm": 0.8350300788879395,
"learning_rate": 4.25e-06,
"loss": 0.0463,
"step": 85
},
{
"epoch": 0.7435158501440923,
"grad_norm": 0.7880110740661621,
"learning_rate": 4.3e-06,
"loss": 0.0426,
"step": 86
},
{
"epoch": 0.7521613832853026,
"grad_norm": 0.77886962890625,
"learning_rate": 4.350000000000001e-06,
"loss": 0.0476,
"step": 87
},
{
"epoch": 0.760806916426513,
"grad_norm": 1.0404386520385742,
"learning_rate": 4.4e-06,
"loss": 0.0501,
"step": 88
},
{
"epoch": 0.7694524495677233,
"grad_norm": 0.5827208757400513,
"learning_rate": 4.450000000000001e-06,
"loss": 0.0396,
"step": 89
},
{
"epoch": 0.7780979827089337,
"grad_norm": 0.5928618907928467,
"learning_rate": 4.5e-06,
"loss": 0.0438,
"step": 90
},
{
"epoch": 0.7867435158501441,
"grad_norm": 0.5311946272850037,
"learning_rate": 4.5500000000000005e-06,
"loss": 0.0388,
"step": 91
},
{
"epoch": 0.7953890489913544,
"grad_norm": 0.7609073519706726,
"learning_rate": 4.600000000000001e-06,
"loss": 0.0403,
"step": 92
},
{
"epoch": 0.8040345821325648,
"grad_norm": 0.6055853962898254,
"learning_rate": 4.65e-06,
"loss": 0.0402,
"step": 93
},
{
"epoch": 0.8126801152737753,
"grad_norm": 0.8020023703575134,
"learning_rate": 4.7e-06,
"loss": 0.0415,
"step": 94
},
{
"epoch": 0.8213256484149856,
"grad_norm": 0.9083772301673889,
"learning_rate": 4.75e-06,
"loss": 0.0428,
"step": 95
},
{
"epoch": 0.829971181556196,
"grad_norm": 0.6658433079719543,
"learning_rate": 4.800000000000001e-06,
"loss": 0.0381,
"step": 96
},
{
"epoch": 0.8386167146974063,
"grad_norm": 0.925826907157898,
"learning_rate": 4.85e-06,
"loss": 0.0465,
"step": 97
},
{
"epoch": 0.8472622478386167,
"grad_norm": 0.5956787467002869,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.0429,
"step": 98
},
{
"epoch": 0.8559077809798271,
"grad_norm": 0.8485273718833923,
"learning_rate": 4.95e-06,
"loss": 0.0465,
"step": 99
},
{
"epoch": 0.8645533141210374,
"grad_norm": 0.7477124333381653,
"learning_rate": 5e-06,
"loss": 0.0407,
"step": 100
},
{
"epoch": 0.8731988472622478,
"grad_norm": 0.624204695224762,
"learning_rate": 4.999964559102694e-06,
"loss": 0.0433,
"step": 101
},
{
"epoch": 0.8818443804034583,
"grad_norm": 0.6230912804603577,
"learning_rate": 4.999858237415621e-06,
"loss": 0.0364,
"step": 102
},
{
"epoch": 0.8904899135446686,
"grad_norm": 0.9286472201347351,
"learning_rate": 4.999681037953289e-06,
"loss": 0.0385,
"step": 103
},
{
"epoch": 0.899135446685879,
"grad_norm": 1.0528490543365479,
"learning_rate": 4.999432965739786e-06,
"loss": 0.0406,
"step": 104
},
{
"epoch": 0.9077809798270894,
"grad_norm": 1.1465263366699219,
"learning_rate": 4.999114027808632e-06,
"loss": 0.0526,
"step": 105
},
{
"epoch": 0.9164265129682997,
"grad_norm": 0.7173194885253906,
"learning_rate": 4.998724233202585e-06,
"loss": 0.05,
"step": 106
},
{
"epoch": 0.9250720461095101,
"grad_norm": 0.7150623798370361,
"learning_rate": 4.998263592973382e-06,
"loss": 0.0392,
"step": 107
},
{
"epoch": 0.9337175792507204,
"grad_norm": 0.6749414801597595,
"learning_rate": 4.9977321201814235e-06,
"loss": 0.0381,
"step": 108
},
{
"epoch": 0.9423631123919308,
"grad_norm": 0.7057478427886963,
"learning_rate": 4.997129829895409e-06,
"loss": 0.0427,
"step": 109
},
{
"epoch": 0.9510086455331412,
"grad_norm": 0.8370860815048218,
"learning_rate": 4.996456739191905e-06,
"loss": 0.0353,
"step": 110
},
{
"epoch": 0.9596541786743515,
"grad_norm": 0.8583172559738159,
"learning_rate": 4.995712867154863e-06,
"loss": 0.0366,
"step": 111
},
{
"epoch": 0.968299711815562,
"grad_norm": 0.9564568400382996,
"learning_rate": 4.994898234875075e-06,
"loss": 0.0463,
"step": 112
},
{
"epoch": 0.9769452449567724,
"grad_norm": 0.7943828105926514,
"learning_rate": 4.9940128654495826e-06,
"loss": 0.0366,
"step": 113
},
{
"epoch": 0.9855907780979827,
"grad_norm": 0.9387117624282837,
"learning_rate": 4.9930567839810125e-06,
"loss": 0.0433,
"step": 114
},
{
"epoch": 0.9942363112391931,
"grad_norm": 0.5982036590576172,
"learning_rate": 4.992030017576876e-06,
"loss": 0.0311,
"step": 115
},
{
"epoch": 1.0086455331412103,
"grad_norm": 1.6264456510543823,
"learning_rate": 4.990932595348788e-06,
"loss": 0.079,
"step": 116
},
{
"epoch": 1.0172910662824208,
"grad_norm": 0.5183172225952148,
"learning_rate": 4.989764548411654e-06,
"loss": 0.0268,
"step": 117
},
{
"epoch": 1.0259365994236311,
"grad_norm": 0.5135197043418884,
"learning_rate": 4.988525909882779e-06,
"loss": 0.0281,
"step": 118
},
{
"epoch": 1.0345821325648414,
"grad_norm": 0.6001684665679932,
"learning_rate": 4.987216714880929e-06,
"loss": 0.0312,
"step": 119
},
{
"epoch": 1.043227665706052,
"grad_norm": 0.5347486138343811,
"learning_rate": 4.9858370005253435e-06,
"loss": 0.0264,
"step": 120
},
{
"epoch": 1.0518731988472623,
"grad_norm": 0.8435088396072388,
"learning_rate": 4.9843868059346725e-06,
"loss": 0.0296,
"step": 121
},
{
"epoch": 1.0605187319884726,
"grad_norm": 0.8622809052467346,
"learning_rate": 4.982866172225876e-06,
"loss": 0.0277,
"step": 122
},
{
"epoch": 1.069164265129683,
"grad_norm": 0.8440551161766052,
"learning_rate": 4.981275142513049e-06,
"loss": 0.0301,
"step": 123
},
{
"epoch": 1.0778097982708934,
"grad_norm": 0.6853605508804321,
"learning_rate": 4.979613761906212e-06,
"loss": 0.0264,
"step": 124
},
{
"epoch": 1.0864553314121037,
"grad_norm": 0.7620036602020264,
"learning_rate": 4.977882077510018e-06,
"loss": 0.0281,
"step": 125
},
{
"epoch": 1.0951008645533142,
"grad_norm": 0.9126653075218201,
"learning_rate": 4.9760801384224274e-06,
"loss": 0.0312,
"step": 126
},
{
"epoch": 1.1037463976945245,
"grad_norm": 0.6158509850502014,
"learning_rate": 4.97420799573331e-06,
"loss": 0.0263,
"step": 127
},
{
"epoch": 1.1123919308357348,
"grad_norm": 0.7442693114280701,
"learning_rate": 4.972265702523001e-06,
"loss": 0.0251,
"step": 128
},
{
"epoch": 1.1210374639769451,
"grad_norm": 0.5755389928817749,
"learning_rate": 4.970253313860788e-06,
"loss": 0.0276,
"step": 129
},
{
"epoch": 1.1296829971181557,
"grad_norm": 0.6565670967102051,
"learning_rate": 4.968170886803361e-06,
"loss": 0.029,
"step": 130
},
{
"epoch": 1.138328530259366,
"grad_norm": 0.6405811309814453,
"learning_rate": 4.966018480393189e-06,
"loss": 0.0293,
"step": 131
},
{
"epoch": 1.1469740634005763,
"grad_norm": 0.5925298929214478,
"learning_rate": 4.9637961556568405e-06,
"loss": 0.0264,
"step": 132
},
{
"epoch": 1.1556195965417868,
"grad_norm": 0.5921182632446289,
"learning_rate": 4.961503975603263e-06,
"loss": 0.0305,
"step": 133
},
{
"epoch": 1.1642651296829971,
"grad_norm": 0.6314800977706909,
"learning_rate": 4.959142005221991e-06,
"loss": 0.0242,
"step": 134
},
{
"epoch": 1.1729106628242074,
"grad_norm": 0.6051074862480164,
"learning_rate": 4.956710311481303e-06,
"loss": 0.0303,
"step": 135
},
{
"epoch": 1.181556195965418,
"grad_norm": 0.7119901776313782,
"learning_rate": 4.954208963326327e-06,
"loss": 0.023,
"step": 136
},
{
"epoch": 1.1902017291066282,
"grad_norm": 1.087274432182312,
"learning_rate": 4.951638031677081e-06,
"loss": 0.0222,
"step": 137
},
{
"epoch": 1.1988472622478386,
"grad_norm": 0.5598759651184082,
"learning_rate": 4.948997589426463e-06,
"loss": 0.0255,
"step": 138
},
{
"epoch": 1.207492795389049,
"grad_norm": 0.7533854246139526,
"learning_rate": 4.94628771143819e-06,
"loss": 0.0245,
"step": 139
},
{
"epoch": 1.2161383285302594,
"grad_norm": 0.7722812294960022,
"learning_rate": 4.943508474544667e-06,
"loss": 0.0237,
"step": 140
},
{
"epoch": 1.2247838616714697,
"grad_norm": 0.6604969501495361,
"learning_rate": 4.940659957544813e-06,
"loss": 0.02,
"step": 141
},
{
"epoch": 1.23342939481268,
"grad_norm": 0.5789263248443604,
"learning_rate": 4.937742241201826e-06,
"loss": 0.0263,
"step": 142
},
{
"epoch": 1.2420749279538905,
"grad_norm": 1.1343308687210083,
"learning_rate": 4.934755408240896e-06,
"loss": 0.0291,
"step": 143
},
{
"epoch": 1.2507204610951008,
"grad_norm": 0.7230944037437439,
"learning_rate": 4.931699543346854e-06,
"loss": 0.0245,
"step": 144
},
{
"epoch": 1.2593659942363113,
"grad_norm": 0.591327428817749,
"learning_rate": 4.928574733161775e-06,
"loss": 0.0241,
"step": 145
},
{
"epoch": 1.2680115273775217,
"grad_norm": 0.6604726910591125,
"learning_rate": 4.925381066282522e-06,
"loss": 0.0242,
"step": 146
},
{
"epoch": 1.276657060518732,
"grad_norm": 0.5772287845611572,
"learning_rate": 4.922118633258229e-06,
"loss": 0.0272,
"step": 147
},
{
"epoch": 1.2853025936599423,
"grad_norm": 0.7148783802986145,
"learning_rate": 4.918787526587739e-06,
"loss": 0.0196,
"step": 148
},
{
"epoch": 1.2939481268011528,
"grad_norm": 0.660778284072876,
"learning_rate": 4.9153878407169815e-06,
"loss": 0.0202,
"step": 149
},
{
"epoch": 1.302593659942363,
"grad_norm": 0.6156355738639832,
"learning_rate": 4.911919672036291e-06,
"loss": 0.0297,
"step": 150
},
{
"epoch": 1.3112391930835736,
"grad_norm": 0.6828760504722595,
"learning_rate": 4.908383118877672e-06,
"loss": 0.0235,
"step": 151
},
{
"epoch": 1.319884726224784,
"grad_norm": 0.7847033143043518,
"learning_rate": 4.904778281512022e-06,
"loss": 0.0278,
"step": 152
},
{
"epoch": 1.3285302593659942,
"grad_norm": 0.9113219976425171,
"learning_rate": 4.901105262146275e-06,
"loss": 0.0226,
"step": 153
},
{
"epoch": 1.3371757925072045,
"grad_norm": 0.5406802892684937,
"learning_rate": 4.897364164920515e-06,
"loss": 0.0205,
"step": 154
},
{
"epoch": 1.345821325648415,
"grad_norm": 0.7523823976516724,
"learning_rate": 4.8935550959050135e-06,
"loss": 0.0209,
"step": 155
},
{
"epoch": 1.3544668587896254,
"grad_norm": 0.7060834169387817,
"learning_rate": 4.889678163097233e-06,
"loss": 0.0217,
"step": 156
},
{
"epoch": 1.3631123919308357,
"grad_norm": 0.758028507232666,
"learning_rate": 4.885733476418752e-06,
"loss": 0.02,
"step": 157
},
{
"epoch": 1.3717579250720462,
"grad_norm": 0.7745693325996399,
"learning_rate": 4.8817211477121615e-06,
"loss": 0.0197,
"step": 158
},
{
"epoch": 1.3804034582132565,
"grad_norm": 0.665055513381958,
"learning_rate": 4.8776412907378845e-06,
"loss": 0.019,
"step": 159
},
{
"epoch": 1.3890489913544668,
"grad_norm": 0.6661068797111511,
"learning_rate": 4.8734940211709535e-06,
"loss": 0.0196,
"step": 160
},
{
"epoch": 1.397694524495677,
"grad_norm": 0.9675925374031067,
"learning_rate": 4.8692794565977335e-06,
"loss": 0.0171,
"step": 161
},
{
"epoch": 1.4063400576368876,
"grad_norm": 0.6929494738578796,
"learning_rate": 4.864997716512584e-06,
"loss": 0.0198,
"step": 162
},
{
"epoch": 1.414985590778098,
"grad_norm": 0.6929437518119812,
"learning_rate": 4.8606489223144744e-06,
"loss": 0.0152,
"step": 163
},
{
"epoch": 1.4236311239193085,
"grad_norm": 0.7960386276245117,
"learning_rate": 4.8562331973035396e-06,
"loss": 0.0177,
"step": 164
},
{
"epoch": 1.4322766570605188,
"grad_norm": 0.8672429919242859,
"learning_rate": 4.851750666677583e-06,
"loss": 0.0215,
"step": 165
},
{
"epoch": 1.440922190201729,
"grad_norm": 0.7653059959411621,
"learning_rate": 4.847201457528533e-06,
"loss": 0.0172,
"step": 166
},
{
"epoch": 1.4495677233429394,
"grad_norm": 0.8027440905570984,
"learning_rate": 4.842585698838832e-06,
"loss": 0.0145,
"step": 167
},
{
"epoch": 1.45821325648415,
"grad_norm": 0.6481142044067383,
"learning_rate": 4.837903521477784e-06,
"loss": 0.0184,
"step": 168
},
{
"epoch": 1.4668587896253602,
"grad_norm": 0.8014233112335205,
"learning_rate": 4.833155058197842e-06,
"loss": 0.0207,
"step": 169
},
{
"epoch": 1.4755043227665707,
"grad_norm": 0.719078779220581,
"learning_rate": 4.828340443630847e-06,
"loss": 0.0184,
"step": 170
},
{
"epoch": 1.484149855907781,
"grad_norm": 0.6776456832885742,
"learning_rate": 4.823459814284205e-06,
"loss": 0.0186,
"step": 171
},
{
"epoch": 1.4927953890489913,
"grad_norm": 0.8054779171943665,
"learning_rate": 4.818513308537025e-06,
"loss": 0.0179,
"step": 172
},
{
"epoch": 1.5014409221902016,
"grad_norm": 0.8131511211395264,
"learning_rate": 4.813501066636188e-06,
"loss": 0.0177,
"step": 173
},
{
"epoch": 1.510086455331412,
"grad_norm": 0.7842754125595093,
"learning_rate": 4.808423230692374e-06,
"loss": 0.0182,
"step": 174
},
{
"epoch": 1.5187319884726225,
"grad_norm": 0.6084232330322266,
"learning_rate": 4.8032799446760326e-06,
"loss": 0.014,
"step": 175
},
{
"epoch": 1.527377521613833,
"grad_norm": 0.7110248804092407,
"learning_rate": 4.798071354413302e-06,
"loss": 0.0164,
"step": 176
},
{
"epoch": 1.5360230547550433,
"grad_norm": 0.707081139087677,
"learning_rate": 4.792797607581872e-06,
"loss": 0.0137,
"step": 177
},
{
"epoch": 1.5446685878962536,
"grad_norm": 0.6509385704994202,
"learning_rate": 4.787458853706798e-06,
"loss": 0.0145,
"step": 178
},
{
"epoch": 1.553314121037464,
"grad_norm": 0.7796662449836731,
"learning_rate": 4.7820552441562625e-06,
"loss": 0.0169,
"step": 179
},
{
"epoch": 1.5619596541786742,
"grad_norm": 0.7351200580596924,
"learning_rate": 4.7765869321372835e-06,
"loss": 0.0152,
"step": 180
},
{
"epoch": 1.5706051873198847,
"grad_norm": 0.9273463487625122,
"learning_rate": 4.771054072691367e-06,
"loss": 0.0181,
"step": 181
},
{
"epoch": 1.579250720461095,
"grad_norm": 0.7243021130561829,
"learning_rate": 4.7654568226901165e-06,
"loss": 0.0148,
"step": 182
},
{
"epoch": 1.5878962536023056,
"grad_norm": 0.729568600654602,
"learning_rate": 4.759795340830782e-06,
"loss": 0.0145,
"step": 183
},
{
"epoch": 1.5965417867435159,
"grad_norm": 0.5814985632896423,
"learning_rate": 4.754069787631761e-06,
"loss": 0.0128,
"step": 184
},
{
"epoch": 1.6051873198847262,
"grad_norm": 0.49074554443359375,
"learning_rate": 4.7482803254280485e-06,
"loss": 0.0125,
"step": 185
},
{
"epoch": 1.6138328530259365,
"grad_norm": 0.5640458464622498,
"learning_rate": 4.742427118366632e-06,
"loss": 0.0155,
"step": 186
},
{
"epoch": 1.622478386167147,
"grad_norm": 0.574414849281311,
"learning_rate": 4.736510332401841e-06,
"loss": 0.0164,
"step": 187
},
{
"epoch": 1.6311239193083573,
"grad_norm": 0.5837032198905945,
"learning_rate": 4.730530135290638e-06,
"loss": 0.0129,
"step": 188
},
{
"epoch": 1.6397694524495678,
"grad_norm": 0.5181459188461304,
"learning_rate": 4.724486696587862e-06,
"loss": 0.016,
"step": 189
},
{
"epoch": 1.6484149855907781,
"grad_norm": 0.990842342376709,
"learning_rate": 4.718380187641429e-06,
"loss": 0.0173,
"step": 190
},
{
"epoch": 1.6570605187319885,
"grad_norm": 0.6795041561126709,
"learning_rate": 4.712210781587463e-06,
"loss": 0.013,
"step": 191
},
{
"epoch": 1.6657060518731988,
"grad_norm": 0.7275810837745667,
"learning_rate": 4.705978653345392e-06,
"loss": 0.0142,
"step": 192
},
{
"epoch": 1.674351585014409,
"grad_norm": 0.575529932975769,
"learning_rate": 4.699683979612991e-06,
"loss": 0.0128,
"step": 193
},
{
"epoch": 1.6829971181556196,
"grad_norm": 0.8860746622085571,
"learning_rate": 4.693326938861367e-06,
"loss": 0.0115,
"step": 194
},
{
"epoch": 1.6916426512968301,
"grad_norm": 0.985464870929718,
"learning_rate": 4.686907711329903e-06,
"loss": 0.0146,
"step": 195
},
{
"epoch": 1.7002881844380404,
"grad_norm": 0.5690702199935913,
"learning_rate": 4.680426479021147e-06,
"loss": 0.0133,
"step": 196
},
{
"epoch": 1.7089337175792507,
"grad_norm": 0.536115825176239,
"learning_rate": 4.67388342569565e-06,
"loss": 0.0082,
"step": 197
},
{
"epoch": 1.717579250720461,
"grad_norm": 0.9312114119529724,
"learning_rate": 4.667278736866755e-06,
"loss": 0.0127,
"step": 198
},
{
"epoch": 1.7262247838616713,
"grad_norm": 0.8902615904808044,
"learning_rate": 4.660612599795343e-06,
"loss": 0.0121,
"step": 199
},
{
"epoch": 1.7348703170028819,
"grad_norm": 0.8274016380310059,
"learning_rate": 4.653885203484516e-06,
"loss": 0.0142,
"step": 200
},
{
"epoch": 1.7435158501440924,
"grad_norm": 1.00035560131073,
"learning_rate": 4.647096738674243e-06,
"loss": 0.0104,
"step": 201
},
{
"epoch": 1.7521613832853027,
"grad_norm": 0.5850755572319031,
"learning_rate": 4.640247397835953e-06,
"loss": 0.0083,
"step": 202
},
{
"epoch": 1.760806916426513,
"grad_norm": 0.588136613368988,
"learning_rate": 4.633337375167074e-06,
"loss": 0.0108,
"step": 203
},
{
"epoch": 1.7694524495677233,
"grad_norm": 0.6305558681488037,
"learning_rate": 4.626366866585528e-06,
"loss": 0.0103,
"step": 204
},
{
"epoch": 1.7780979827089336,
"grad_norm": 0.46658825874328613,
"learning_rate": 4.619336069724177e-06,
"loss": 0.0103,
"step": 205
},
{
"epoch": 1.7867435158501441,
"grad_norm": 2.3057351112365723,
"learning_rate": 4.612245183925225e-06,
"loss": 0.0123,
"step": 206
},
{
"epoch": 1.7953890489913544,
"grad_norm": 0.6322522163391113,
"learning_rate": 4.605094410234551e-06,
"loss": 0.0123,
"step": 207
},
{
"epoch": 1.804034582132565,
"grad_norm": 1.1089496612548828,
"learning_rate": 4.597883951396027e-06,
"loss": 0.0149,
"step": 208
},
{
"epoch": 1.8126801152737753,
"grad_norm": 0.7867231965065002,
"learning_rate": 4.590614011845758e-06,
"loss": 0.01,
"step": 209
},
{
"epoch": 1.8213256484149856,
"grad_norm": 0.8597843647003174,
"learning_rate": 4.583284797706288e-06,
"loss": 0.0121,
"step": 210
},
{
"epoch": 1.8299711815561959,
"grad_norm": 0.6839237809181213,
"learning_rate": 4.575896516780757e-06,
"loss": 0.0108,
"step": 211
},
{
"epoch": 1.8386167146974062,
"grad_norm": 0.9036144614219666,
"learning_rate": 4.568449378547011e-06,
"loss": 0.009,
"step": 212
},
{
"epoch": 1.8472622478386167,
"grad_norm": 0.439732164144516,
"learning_rate": 4.560943594151657e-06,
"loss": 0.0097,
"step": 213
},
{
"epoch": 1.8559077809798272,
"grad_norm": 0.5021887421607971,
"learning_rate": 4.553379376404085e-06,
"loss": 0.0087,
"step": 214
},
{
"epoch": 1.8645533141210375,
"grad_norm": 0.36055275797843933,
"learning_rate": 4.5457569397704226e-06,
"loss": 0.0079,
"step": 215
},
{
"epoch": 1.8731988472622478,
"grad_norm": 0.5071420669555664,
"learning_rate": 4.538076500367469e-06,
"loss": 0.0065,
"step": 216
},
{
"epoch": 1.8818443804034581,
"grad_norm": 0.5510254502296448,
"learning_rate": 4.530338275956553e-06,
"loss": 0.0099,
"step": 217
},
{
"epoch": 1.8904899135446684,
"grad_norm": 0.8489411473274231,
"learning_rate": 4.522542485937369e-06,
"loss": 0.0068,
"step": 218
},
{
"epoch": 1.899135446685879,
"grad_norm": 0.48011505603790283,
"learning_rate": 4.514689351341751e-06,
"loss": 0.007,
"step": 219
},
{
"epoch": 1.9077809798270895,
"grad_norm": 0.7145248055458069,
"learning_rate": 4.506779094827409e-06,
"loss": 0.0085,
"step": 220
},
{
"epoch": 1.9164265129682998,
"grad_norm": 0.7936932444572449,
"learning_rate": 4.498811940671615e-06,
"loss": 0.0086,
"step": 221
},
{
"epoch": 1.92507204610951,
"grad_norm": 0.7309126257896423,
"learning_rate": 4.49078811476484e-06,
"loss": 0.0093,
"step": 222
},
{
"epoch": 1.9337175792507204,
"grad_norm": 0.6176413893699646,
"learning_rate": 4.482707844604359e-06,
"loss": 0.0095,
"step": 223
},
{
"epoch": 1.9423631123919307,
"grad_norm": 0.995867133140564,
"learning_rate": 4.474571359287791e-06,
"loss": 0.0061,
"step": 224
},
{
"epoch": 1.9510086455331412,
"grad_norm": 0.47976699471473694,
"learning_rate": 4.466378889506607e-06,
"loss": 0.0054,
"step": 225
},
{
"epoch": 1.9596541786743515,
"grad_norm": 0.740679144859314,
"learning_rate": 4.458130667539592e-06,
"loss": 0.0097,
"step": 226
},
{
"epoch": 1.968299711815562,
"grad_norm": 0.565301775932312,
"learning_rate": 4.449826927246257e-06,
"loss": 0.008,
"step": 227
},
{
"epoch": 1.9769452449567724,
"grad_norm": 0.6384701728820801,
"learning_rate": 4.441467904060207e-06,
"loss": 0.0053,
"step": 228
},
{
"epoch": 1.9855907780979827,
"grad_norm": 0.8729382157325745,
"learning_rate": 4.4330538349824684e-06,
"loss": 0.0092,
"step": 229
},
{
"epoch": 1.994236311239193,
"grad_norm": 0.6458576321601868,
"learning_rate": 4.424584958574766e-06,
"loss": 0.0082,
"step": 230
}
],
"logging_steps": 1,
"max_steps": 690,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 115,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.81830452487127e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}