true / trainer_state.json
JK-TK's picture
Upload trained model and tokenizer ๐Ÿš€
d47e621 verified
Invalid JSON: Unexpected token 'I', ..."ad_norm": Infinity, "... is not valid JSON
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984,
"eval_steps": 500,
"global_step": 156,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0064,
"grad_norm": Infinity,
"learning_rate": 0.0,
"loss": 4.4717,
"step": 1
},
{
"epoch": 0.0128,
"grad_norm": Infinity,
"learning_rate": 0.0,
"loss": 3.7313,
"step": 2
},
{
"epoch": 0.0192,
"grad_norm": 50.09918975830078,
"learning_rate": 0.0,
"loss": 4.8819,
"step": 3
},
{
"epoch": 0.0256,
"grad_norm": 49.641700744628906,
"learning_rate": 3.125e-06,
"loss": 5.1502,
"step": 4
},
{
"epoch": 0.032,
"grad_norm": 38.01877975463867,
"learning_rate": 6.25e-06,
"loss": 4.0776,
"step": 5
},
{
"epoch": 0.0384,
"grad_norm": 34.66939926147461,
"learning_rate": 9.375000000000001e-06,
"loss": 3.8857,
"step": 6
},
{
"epoch": 0.0448,
"grad_norm": 33.477439880371094,
"learning_rate": 1.25e-05,
"loss": 3.8721,
"step": 7
},
{
"epoch": 0.0512,
"grad_norm": 45.55080795288086,
"learning_rate": 1.5625e-05,
"loss": 3.7879,
"step": 8
},
{
"epoch": 0.0576,
"grad_norm": 32.00156021118164,
"learning_rate": 1.8750000000000002e-05,
"loss": 3.345,
"step": 9
},
{
"epoch": 0.064,
"grad_norm": 29.618633270263672,
"learning_rate": 2.1875e-05,
"loss": 3.1297,
"step": 10
},
{
"epoch": 0.0704,
"grad_norm": 32.84077453613281,
"learning_rate": 2.5e-05,
"loss": 3.3396,
"step": 11
},
{
"epoch": 0.0768,
"grad_norm": 31.820215225219727,
"learning_rate": 2.8125000000000003e-05,
"loss": 3.0906,
"step": 12
},
{
"epoch": 0.0832,
"grad_norm": 27.288164138793945,
"learning_rate": 3.125e-05,
"loss": 2.8775,
"step": 13
},
{
"epoch": 0.0896,
"grad_norm": 30.749038696289062,
"learning_rate": 3.4375e-05,
"loss": 3.0161,
"step": 14
},
{
"epoch": 0.096,
"grad_norm": 25.01470184326172,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.8359,
"step": 15
},
{
"epoch": 0.1024,
"grad_norm": 24.316774368286133,
"learning_rate": 4.0625000000000005e-05,
"loss": 2.7709,
"step": 16
},
{
"epoch": 0.1088,
"grad_norm": 21.760366439819336,
"learning_rate": 4.375e-05,
"loss": 2.6184,
"step": 17
},
{
"epoch": 0.1152,
"grad_norm": 18.646337509155273,
"learning_rate": 4.6875e-05,
"loss": 2.1646,
"step": 18
},
{
"epoch": 0.1216,
"grad_norm": 23.844223022460938,
"learning_rate": 5e-05,
"loss": 2.8295,
"step": 19
},
{
"epoch": 0.128,
"grad_norm": 24.90797233581543,
"learning_rate": 4.9993705873562665e-05,
"loss": 2.9796,
"step": 20
},
{
"epoch": 0.1344,
"grad_norm": 23.792762756347656,
"learning_rate": 4.997482666353287e-05,
"loss": 2.9825,
"step": 21
},
{
"epoch": 0.1408,
"grad_norm": 21.59649658203125,
"learning_rate": 4.99433718761614e-05,
"loss": 2.496,
"step": 22
},
{
"epoch": 0.1472,
"grad_norm": Infinity,
"learning_rate": 4.989935734988098e-05,
"loss": 2.2942,
"step": 23
},
{
"epoch": 0.1536,
"grad_norm": 26.09481430053711,
"learning_rate": 4.989935734988098e-05,
"loss": 2.4601,
"step": 24
},
{
"epoch": 0.16,
"grad_norm": Infinity,
"learning_rate": 4.984280524733107e-05,
"loss": 2.7508,
"step": 25
},
{
"epoch": 0.1664,
"grad_norm": Infinity,
"learning_rate": 4.984280524733107e-05,
"loss": 3.4579,
"step": 26
},
{
"epoch": 0.1728,
"grad_norm": 196.4443359375,
"learning_rate": 4.984280524733107e-05,
"loss": 2.5371,
"step": 27
},
{
"epoch": 0.1792,
"grad_norm": 24.16455078125,
"learning_rate": 4.977374404419837e-05,
"loss": 2.4961,
"step": 28
},
{
"epoch": 0.1856,
"grad_norm": 20.553077697753906,
"learning_rate": 4.9692208514878444e-05,
"loss": 2.0176,
"step": 29
},
{
"epoch": 0.192,
"grad_norm": 20.222591400146484,
"learning_rate": 4.959823971496574e-05,
"loss": 2.3133,
"step": 30
},
{
"epoch": 0.1984,
"grad_norm": 23.096298217773438,
"learning_rate": 4.9491884960580894e-05,
"loss": 2.6822,
"step": 31
},
{
"epoch": 0.2048,
"grad_norm": 20.923219680786133,
"learning_rate": 4.937319780454559e-05,
"loss": 2.718,
"step": 32
},
{
"epoch": 0.2112,
"grad_norm": 20.68602752685547,
"learning_rate": 4.9242238009417175e-05,
"loss": 2.7881,
"step": 33
},
{
"epoch": 0.2176,
"grad_norm": 20.52227210998535,
"learning_rate": 4.909907151739633e-05,
"loss": 2.4548,
"step": 34
},
{
"epoch": 0.224,
"grad_norm": 20.739513397216797,
"learning_rate": 4.894377041712326e-05,
"loss": 2.7408,
"step": 35
},
{
"epoch": 0.2304,
"grad_norm": 16.86931610107422,
"learning_rate": 4.877641290737884e-05,
"loss": 2.2282,
"step": 36
},
{
"epoch": 0.2368,
"grad_norm": 20.215930938720703,
"learning_rate": 4.8597083257709194e-05,
"loss": 2.3036,
"step": 37
},
{
"epoch": 0.2432,
"grad_norm": 21.190462112426758,
"learning_rate": 4.8405871765993433e-05,
"loss": 2.0958,
"step": 38
},
{
"epoch": 0.2496,
"grad_norm": 18.445409774780273,
"learning_rate": 4.820287471297598e-05,
"loss": 2.1603,
"step": 39
},
{
"epoch": 0.256,
"grad_norm": 17.071805953979492,
"learning_rate": 4.7988194313786275e-05,
"loss": 2.6078,
"step": 40
},
{
"epoch": 0.2624,
"grad_norm": 18.929542541503906,
"learning_rate": 4.7761938666470403e-05,
"loss": 2.0112,
"step": 41
},
{
"epoch": 0.2688,
"grad_norm": 17.4970645904541,
"learning_rate": 4.752422169756048e-05,
"loss": 2.6611,
"step": 42
},
{
"epoch": 0.2752,
"grad_norm": 19.641874313354492,
"learning_rate": 4.72751631047092e-05,
"loss": 2.369,
"step": 43
},
{
"epoch": 0.2816,
"grad_norm": 18.337270736694336,
"learning_rate": 4.701488829641845e-05,
"loss": 2.744,
"step": 44
},
{
"epoch": 0.288,
"grad_norm": 17.220279693603516,
"learning_rate": 4.674352832889239e-05,
"loss": 2.3373,
"step": 45
},
{
"epoch": 0.2944,
"grad_norm": 13.7833251953125,
"learning_rate": 4.6461219840046654e-05,
"loss": 2.1818,
"step": 46
},
{
"epoch": 0.3008,
"grad_norm": 19.681421279907227,
"learning_rate": 4.6168104980707107e-05,
"loss": 2.846,
"step": 47
},
{
"epoch": 0.3072,
"grad_norm": 17.642488479614258,
"learning_rate": 4.586433134303257e-05,
"loss": 2.6217,
"step": 48
},
{
"epoch": 0.3136,
"grad_norm": 15.780743598937988,
"learning_rate": 4.5550051886197754e-05,
"loss": 2.0978,
"step": 49
},
{
"epoch": 0.32,
"grad_norm": 18.60186004638672,
"learning_rate": 4.522542485937369e-05,
"loss": 2.8495,
"step": 50
},
{
"epoch": 0.3264,
"grad_norm": 16.284923553466797,
"learning_rate": 4.489061372204453e-05,
"loss": 1.9968,
"step": 51
},
{
"epoch": 0.3328,
"grad_norm": 18.098651885986328,
"learning_rate": 4.454578706170075e-05,
"loss": 2.8142,
"step": 52
},
{
"epoch": 0.3392,
"grad_norm": 23.010522842407227,
"learning_rate": 4.419111850895028e-05,
"loss": 2.848,
"step": 53
},
{
"epoch": 0.3456,
"grad_norm": 21.82461166381836,
"learning_rate": 4.382678665009028e-05,
"loss": 2.3738,
"step": 54
},
{
"epoch": 0.352,
"grad_norm": 20.080095291137695,
"learning_rate": 4.345297493718352e-05,
"loss": 2.55,
"step": 55
},
{
"epoch": 0.3584,
"grad_norm": 16.38557243347168,
"learning_rate": 4.306987159568479e-05,
"loss": 2.2054,
"step": 56
},
{
"epoch": 0.3648,
"grad_norm": 16.24085235595703,
"learning_rate": 4.267766952966369e-05,
"loss": 2.2092,
"step": 57
},
{
"epoch": 0.3712,
"grad_norm": 17.953500747680664,
"learning_rate": 4.227656622467162e-05,
"loss": 2.6697,
"step": 58
},
{
"epoch": 0.3776,
"grad_norm": 16.460187911987305,
"learning_rate": 4.186676364830186e-05,
"loss": 2.2483,
"step": 59
},
{
"epoch": 0.384,
"grad_norm": 15.681517601013184,
"learning_rate": 4.144846814849282e-05,
"loss": 2.4484,
"step": 60
},
{
"epoch": 0.3904,
"grad_norm": 18.890939712524414,
"learning_rate": 4.10218903496256e-05,
"loss": 2.3774,
"step": 61
},
{
"epoch": 0.3968,
"grad_norm": 15.988091468811035,
"learning_rate": 4.058724504646834e-05,
"loss": 2.1949,
"step": 62
},
{
"epoch": 0.4032,
"grad_norm": 12.159764289855957,
"learning_rate": 4.01447510960205e-05,
"loss": 2.0308,
"step": 63
},
{
"epoch": 0.4096,
"grad_norm": 16.4202938079834,
"learning_rate": 3.969463130731183e-05,
"loss": 2.1106,
"step": 64
},
{
"epoch": 0.416,
"grad_norm": 16.02802276611328,
"learning_rate": 3.92371123292113e-05,
"loss": 2.2022,
"step": 65
},
{
"epoch": 0.4224,
"grad_norm": 15.44360065460205,
"learning_rate": 3.8772424536302564e-05,
"loss": 2.0292,
"step": 66
},
{
"epoch": 0.4288,
"grad_norm": 16.987598419189453,
"learning_rate": 3.830080191288342e-05,
"loss": 2.3518,
"step": 67
},
{
"epoch": 0.4352,
"grad_norm": 16.269733428955078,
"learning_rate": 3.782248193514766e-05,
"loss": 2.7523,
"step": 68
},
{
"epoch": 0.4416,
"grad_norm": 15.051939964294434,
"learning_rate": 3.7337705451608674e-05,
"loss": 2.2994,
"step": 69
},
{
"epoch": 0.448,
"grad_norm": 16.779054641723633,
"learning_rate": 3.6846716561824965e-05,
"loss": 2.299,
"step": 70
},
{
"epoch": 0.4544,
"grad_norm": 20.661670684814453,
"learning_rate": 3.634976249348867e-05,
"loss": 2.5077,
"step": 71
},
{
"epoch": 0.4608,
"grad_norm": 17.521690368652344,
"learning_rate": 3.5847093477938956e-05,
"loss": 2.3761,
"step": 72
},
{
"epoch": 0.4672,
"grad_norm": 13.472983360290527,
"learning_rate": 3.533896262416302e-05,
"loss": 1.6423,
"step": 73
},
{
"epoch": 0.4736,
"grad_norm": 15.794869422912598,
"learning_rate": 3.4825625791348096e-05,
"loss": 2.3065,
"step": 74
},
{
"epoch": 0.48,
"grad_norm": 14.395452499389648,
"learning_rate": 3.4307341460048633e-05,
"loss": 1.9287,
"step": 75
},
{
"epoch": 0.4864,
"grad_norm": 14.031108856201172,
"learning_rate": 3.378437060203357e-05,
"loss": 1.7176,
"step": 76
},
{
"epoch": 0.4928,
"grad_norm": 14.45324420928955,
"learning_rate": 3.3256976548879184e-05,
"loss": 1.7939,
"step": 77
},
{
"epoch": 0.4992,
"grad_norm": 16.889461517333984,
"learning_rate": 3.272542485937369e-05,
"loss": 2.3255,
"step": 78
},
{
"epoch": 0.5056,
"grad_norm": 17.210290908813477,
"learning_rate": 3.218998318580043e-05,
"loss": 2.413,
"step": 79
},
{
"epoch": 0.512,
"grad_norm": 16.81619644165039,
"learning_rate": 3.165092113916688e-05,
"loss": 2.0536,
"step": 80
},
{
"epoch": 0.5184,
"grad_norm": 17.692567825317383,
"learning_rate": 3.110851015344735e-05,
"loss": 2.4001,
"step": 81
},
{
"epoch": 0.5248,
"grad_norm": 17.277616500854492,
"learning_rate": 3.056302334890786e-05,
"loss": 2.4519,
"step": 82
},
{
"epoch": 0.5312,
"grad_norm": 13.526611328125,
"learning_rate": 3.0014735394581823e-05,
"loss": 2.5048,
"step": 83
},
{
"epoch": 0.5376,
"grad_norm": 12.37924575805664,
"learning_rate": 2.9463922369965917e-05,
"loss": 2.1537,
"step": 84
},
{
"epoch": 0.544,
"grad_norm": 12.96046257019043,
"learning_rate": 2.8910861626005776e-05,
"loss": 2.039,
"step": 85
},
{
"epoch": 0.5504,
"grad_norm": 14.766209602355957,
"learning_rate": 2.8355831645441388e-05,
"loss": 1.8841,
"step": 86
},
{
"epoch": 0.5568,
"grad_norm": 15.26258659362793,
"learning_rate": 2.7799111902582696e-05,
"loss": 1.9104,
"step": 87
},
{
"epoch": 0.5632,
"grad_norm": 17.1923770904541,
"learning_rate": 2.724098272258584e-05,
"loss": 2.2466,
"step": 88
},
{
"epoch": 0.5696,
"grad_norm": 14.253467559814453,
"learning_rate": 2.6681725140300997e-05,
"loss": 1.9563,
"step": 89
},
{
"epoch": 0.576,
"grad_norm": 15.101798057556152,
"learning_rate": 2.6121620758762877e-05,
"loss": 2.0253,
"step": 90
},
{
"epoch": 0.5824,
"grad_norm": 13.99666976928711,
"learning_rate": 2.556095160739513e-05,
"loss": 2.1236,
"step": 91
},
{
"epoch": 0.5888,
"grad_norm": 12.746740341186523,
"learning_rate": 2.5e-05,
"loss": 1.6476,
"step": 92
},
{
"epoch": 0.5952,
"grad_norm": 19.76336097717285,
"learning_rate": 2.443904839260488e-05,
"loss": 2.0452,
"step": 93
},
{
"epoch": 0.6016,
"grad_norm": 25.369564056396484,
"learning_rate": 2.3878379241237136e-05,
"loss": 2.1431,
"step": 94
},
{
"epoch": 0.608,
"grad_norm": 17.740018844604492,
"learning_rate": 2.331827485969901e-05,
"loss": 2.0,
"step": 95
},
{
"epoch": 0.6144,
"grad_norm": 15.30899715423584,
"learning_rate": 2.2759017277414166e-05,
"loss": 2.1131,
"step": 96
},
{
"epoch": 0.6208,
"grad_norm": 14.119115829467773,
"learning_rate": 2.2200888097417307e-05,
"loss": 1.7113,
"step": 97
},
{
"epoch": 0.6272,
"grad_norm": 12.78761100769043,
"learning_rate": 2.164416835455862e-05,
"loss": 1.7024,
"step": 98
},
{
"epoch": 0.6336,
"grad_norm": 14.637947082519531,
"learning_rate": 2.1089138373994223e-05,
"loss": 1.999,
"step": 99
},
{
"epoch": 0.64,
"grad_norm": 12.557490348815918,
"learning_rate": 2.0536077630034086e-05,
"loss": 1.9154,
"step": 100
},
{
"epoch": 0.6464,
"grad_norm": 13.05916690826416,
"learning_rate": 1.9985264605418183e-05,
"loss": 2.1664,
"step": 101
},
{
"epoch": 0.6528,
"grad_norm": 13.55716609954834,
"learning_rate": 1.9436976651092144e-05,
"loss": 2.1371,
"step": 102
},
{
"epoch": 0.6592,
"grad_norm": 13.797104835510254,
"learning_rate": 1.8891489846552646e-05,
"loss": 2.0145,
"step": 103
},
{
"epoch": 0.6656,
"grad_norm": 17.893796920776367,
"learning_rate": 1.8349078860833123e-05,
"loss": 2.5387,
"step": 104
},
{
"epoch": 0.672,
"grad_norm": 13.919113159179688,
"learning_rate": 1.781001681419957e-05,
"loss": 1.8043,
"step": 105
},
{
"epoch": 0.6784,
"grad_norm": 18.958742141723633,
"learning_rate": 1.7274575140626318e-05,
"loss": 1.8813,
"step": 106
},
{
"epoch": 0.6848,
"grad_norm": 14.358981132507324,
"learning_rate": 1.6743023451120832e-05,
"loss": 2.2653,
"step": 107
},
{
"epoch": 0.6912,
"grad_norm": 16.244213104248047,
"learning_rate": 1.621562939796643e-05,
"loss": 1.8594,
"step": 108
},
{
"epoch": 0.6976,
"grad_norm": 10.932610511779785,
"learning_rate": 1.5692658539951372e-05,
"loss": 1.5337,
"step": 109
},
{
"epoch": 0.704,
"grad_norm": 13.12227725982666,
"learning_rate": 1.5174374208651912e-05,
"loss": 1.7683,
"step": 110
},
{
"epoch": 0.7104,
"grad_norm": 14.502724647521973,
"learning_rate": 1.466103737583699e-05,
"loss": 1.9204,
"step": 111
},
{
"epoch": 0.7168,
"grad_norm": 13.592811584472656,
"learning_rate": 1.4152906522061048e-05,
"loss": 1.9949,
"step": 112
},
{
"epoch": 0.7232,
"grad_norm": 12.350465774536133,
"learning_rate": 1.3650237506511331e-05,
"loss": 1.7721,
"step": 113
},
{
"epoch": 0.7296,
"grad_norm": 16.48741912841797,
"learning_rate": 1.3153283438175034e-05,
"loss": 2.2935,
"step": 114
},
{
"epoch": 0.736,
"grad_norm": 15.990097045898438,
"learning_rate": 1.2662294548391328e-05,
"loss": 2.0732,
"step": 115
},
{
"epoch": 0.7424,
"grad_norm": 13.479302406311035,
"learning_rate": 1.217751806485235e-05,
"loss": 1.7489,
"step": 116
},
{
"epoch": 0.7488,
"grad_norm": 14.067448616027832,
"learning_rate": 1.1699198087116589e-05,
"loss": 1.7152,
"step": 117
},
{
"epoch": 0.7552,
"grad_norm": 14.49460220336914,
"learning_rate": 1.122757546369744e-05,
"loss": 1.7153,
"step": 118
},
{
"epoch": 0.7616,
"grad_norm": 13.666254997253418,
"learning_rate": 1.0762887670788702e-05,
"loss": 2.0473,
"step": 119
},
{
"epoch": 0.768,
"grad_norm": 14.832523345947266,
"learning_rate": 1.0305368692688174e-05,
"loss": 1.8324,
"step": 120
},
{
"epoch": 0.7744,
"grad_norm": 12.877457618713379,
"learning_rate": 9.855248903979506e-06,
"loss": 1.8216,
"step": 121
},
{
"epoch": 0.7808,
"grad_norm": 29.62076187133789,
"learning_rate": 9.412754953531663e-06,
"loss": 2.3434,
"step": 122
},
{
"epoch": 0.7872,
"grad_norm": 16.266117095947266,
"learning_rate": 8.978109650374397e-06,
"loss": 2.2554,
"step": 123
},
{
"epoch": 0.7936,
"grad_norm": 14.676825523376465,
"learning_rate": 8.551531851507186e-06,
"loss": 1.9236,
"step": 124
},
{
"epoch": 0.8,
"grad_norm": 15.351411819458008,
"learning_rate": 8.133236351698143e-06,
"loss": 2.2928,
"step": 125
},
{
"epoch": 0.8064,
"grad_norm": 15.592199325561523,
"learning_rate": 7.723433775328384e-06,
"loss": 1.8252,
"step": 126
},
{
"epoch": 0.8128,
"grad_norm": 13.63974666595459,
"learning_rate": 7.3223304703363135e-06,
"loss": 1.863,
"step": 127
},
{
"epoch": 0.8192,
"grad_norm": 14.163743019104004,
"learning_rate": 6.930128404315214e-06,
"loss": 2.1451,
"step": 128
},
{
"epoch": 0.8256,
"grad_norm": 12.641812324523926,
"learning_rate": 6.547025062816486e-06,
"loss": 1.5801,
"step": 129
},
{
"epoch": 0.832,
"grad_norm": 12.196229934692383,
"learning_rate": 6.173213349909729e-06,
"loss": 1.6599,
"step": 130
},
{
"epoch": 0.8384,
"grad_norm": 14.172977447509766,
"learning_rate": 5.808881491049723e-06,
"loss": 1.4121,
"step": 131
},
{
"epoch": 0.8448,
"grad_norm": 15.83859920501709,
"learning_rate": 5.454212938299255e-06,
"loss": 2.1285,
"step": 132
},
{
"epoch": 0.8512,
"grad_norm": 15.493228912353516,
"learning_rate": 5.1093862779554776e-06,
"loss": 1.8166,
"step": 133
},
{
"epoch": 0.8576,
"grad_norm": 12.775120735168457,
"learning_rate": 4.7745751406263165e-06,
"loss": 1.797,
"step": 134
},
{
"epoch": 0.864,
"grad_norm": 16.07529640197754,
"learning_rate": 4.4499481138022544e-06,
"loss": 2.4226,
"step": 135
},
{
"epoch": 0.8704,
"grad_norm": 17.667911529541016,
"learning_rate": 4.135668656967434e-06,
"loss": 2.4773,
"step": 136
},
{
"epoch": 0.8768,
"grad_norm": 11.440982818603516,
"learning_rate": 3.831895019292897e-06,
"loss": 1.9243,
"step": 137
},
{
"epoch": 0.8832,
"grad_norm": 11.593207359313965,
"learning_rate": 3.5387801599533475e-06,
"loss": 1.8069,
"step": 138
},
{
"epoch": 0.8896,
"grad_norm": 12.516495704650879,
"learning_rate": 3.2564716711076167e-06,
"loss": 2.0035,
"step": 139
},
{
"epoch": 0.896,
"grad_norm": 13.478569030761719,
"learning_rate": 2.98511170358155e-06,
"loss": 1.8373,
"step": 140
},
{
"epoch": 0.9024,
"grad_norm": 13.340173721313477,
"learning_rate": 2.7248368952908053e-06,
"loss": 1.8686,
"step": 141
},
{
"epoch": 0.9088,
"grad_norm": 14.038780212402344,
"learning_rate": 2.475778302439524e-06,
"loss": 1.788,
"step": 142
},
{
"epoch": 0.9152,
"grad_norm": 12.520078659057617,
"learning_rate": 2.2380613335296036e-06,
"loss": 1.5771,
"step": 143
},
{
"epoch": 0.9216,
"grad_norm": 14.458097457885742,
"learning_rate": 2.0118056862137357e-06,
"loss": 1.7918,
"step": 144
},
{
"epoch": 0.928,
"grad_norm": 11.08523941040039,
"learning_rate": 1.7971252870240291e-06,
"loss": 1.2751,
"step": 145
},
{
"epoch": 0.9344,
"grad_norm": 16.677467346191406,
"learning_rate": 1.59412823400657e-06,
"loss": 1.7166,
"step": 146
},
{
"epoch": 0.9408,
"grad_norm": 11.89140796661377,
"learning_rate": 1.4029167422908107e-06,
"loss": 1.2959,
"step": 147
},
{
"epoch": 0.9472,
"grad_norm": 12.096765518188477,
"learning_rate": 1.2235870926211619e-06,
"loss": 2.0022,
"step": 148
},
{
"epoch": 0.9536,
"grad_norm": 16.47886085510254,
"learning_rate": 1.0562295828767387e-06,
"loss": 1.8419,
"step": 149
},
{
"epoch": 0.96,
"grad_norm": 14.262722969055176,
"learning_rate": 9.009284826036691e-07,
"loss": 2.0822,
"step": 150
},
{
"epoch": 0.9664,
"grad_norm": 12.945296287536621,
"learning_rate": 7.577619905828282e-07,
"loss": 1.6891,
"step": 151
},
{
"epoch": 0.9728,
"grad_norm": 13.680462837219238,
"learning_rate": 6.268021954544096e-07,
"loss": 1.8831,
"step": 152
},
{
"epoch": 0.9792,
"grad_norm": 12.656660079956055,
"learning_rate": 5.08115039419113e-07,
"loss": 1.5704,
"step": 153
},
{
"epoch": 0.9856,
"grad_norm": 13.578558921813965,
"learning_rate": 4.0176028503425835e-07,
"loss": 1.7809,
"step": 154
},
{
"epoch": 0.992,
"grad_norm": 12.945449829101562,
"learning_rate": 3.077914851215585e-07,
"loss": 1.9945,
"step": 155
},
{
"epoch": 0.9984,
"grad_norm": 13.290226936340332,
"learning_rate": 2.262559558016325e-07,
"loss": 2.065,
"step": 156
}
],
"logging_steps": 1,
"max_steps": 156,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3164804878614528.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}