model_10083bfc / checkpoint-150 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
2819335 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.857142857142857,
"eval_steps": 500,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03296703296703297,
"grad_norm": 31.073850631713867,
"learning_rate": 5.0000000000000004e-08,
"loss": 2.5967,
"step": 1
},
{
"epoch": 0.06593406593406594,
"grad_norm": 32.91181182861328,
"learning_rate": 1.0000000000000001e-07,
"loss": 2.723,
"step": 2
},
{
"epoch": 0.0989010989010989,
"grad_norm": 31.494897842407227,
"learning_rate": 1.5000000000000002e-07,
"loss": 2.6125,
"step": 3
},
{
"epoch": 0.13186813186813187,
"grad_norm": 30.80953598022461,
"learning_rate": 2.0000000000000002e-07,
"loss": 2.58,
"step": 4
},
{
"epoch": 0.16483516483516483,
"grad_norm": 31.269071578979492,
"learning_rate": 2.5000000000000004e-07,
"loss": 2.5977,
"step": 5
},
{
"epoch": 0.1978021978021978,
"grad_norm": 30.687875747680664,
"learning_rate": 3.0000000000000004e-07,
"loss": 2.5588,
"step": 6
},
{
"epoch": 0.23076923076923078,
"grad_norm": 31.30279541015625,
"learning_rate": 3.5000000000000004e-07,
"loss": 2.5731,
"step": 7
},
{
"epoch": 0.26373626373626374,
"grad_norm": 31.384830474853516,
"learning_rate": 4.0000000000000003e-07,
"loss": 2.561,
"step": 8
},
{
"epoch": 0.2967032967032967,
"grad_norm": 30.58422088623047,
"learning_rate": 4.5000000000000003e-07,
"loss": 2.4872,
"step": 9
},
{
"epoch": 0.32967032967032966,
"grad_norm": 30.883068084716797,
"learning_rate": 5.000000000000001e-07,
"loss": 2.5257,
"step": 10
},
{
"epoch": 0.3626373626373626,
"grad_norm": 32.198814392089844,
"learning_rate": 5.5e-07,
"loss": 2.6286,
"step": 11
},
{
"epoch": 0.3956043956043956,
"grad_norm": 31.001300811767578,
"learning_rate": 6.000000000000001e-07,
"loss": 2.4632,
"step": 12
},
{
"epoch": 0.42857142857142855,
"grad_norm": 31.106016159057617,
"learning_rate": 6.5e-07,
"loss": 2.4274,
"step": 13
},
{
"epoch": 0.46153846153846156,
"grad_norm": 31.180011749267578,
"learning_rate": 7.000000000000001e-07,
"loss": 2.3864,
"step": 14
},
{
"epoch": 0.4945054945054945,
"grad_norm": 30.95736312866211,
"learning_rate": 7.5e-07,
"loss": 2.2977,
"step": 15
},
{
"epoch": 0.5274725274725275,
"grad_norm": 31.67963218688965,
"learning_rate": 8.000000000000001e-07,
"loss": 2.2632,
"step": 16
},
{
"epoch": 0.5604395604395604,
"grad_norm": 32.420562744140625,
"learning_rate": 8.500000000000001e-07,
"loss": 2.1983,
"step": 17
},
{
"epoch": 0.5934065934065934,
"grad_norm": 32.46091079711914,
"learning_rate": 9.000000000000001e-07,
"loss": 2.0623,
"step": 18
},
{
"epoch": 0.6263736263736264,
"grad_norm": 31.34447479248047,
"learning_rate": 9.500000000000001e-07,
"loss": 1.8633,
"step": 19
},
{
"epoch": 0.6593406593406593,
"grad_norm": 31.65386962890625,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.7861,
"step": 20
},
{
"epoch": 0.6923076923076923,
"grad_norm": 30.12110137939453,
"learning_rate": 1.0500000000000001e-06,
"loss": 1.6408,
"step": 21
},
{
"epoch": 0.7252747252747253,
"grad_norm": 28.824857711791992,
"learning_rate": 1.1e-06,
"loss": 1.5022,
"step": 22
},
{
"epoch": 0.7582417582417582,
"grad_norm": 27.37271499633789,
"learning_rate": 1.1500000000000002e-06,
"loss": 1.3799,
"step": 23
},
{
"epoch": 0.7912087912087912,
"grad_norm": 26.869949340820312,
"learning_rate": 1.2000000000000002e-06,
"loss": 1.2699,
"step": 24
},
{
"epoch": 0.8241758241758241,
"grad_norm": 26.696306228637695,
"learning_rate": 1.25e-06,
"loss": 1.107,
"step": 25
},
{
"epoch": 0.8571428571428571,
"grad_norm": 29.182300567626953,
"learning_rate": 1.3e-06,
"loss": 0.9981,
"step": 26
},
{
"epoch": 0.8901098901098901,
"grad_norm": 28.117998123168945,
"learning_rate": 1.3500000000000002e-06,
"loss": 0.7886,
"step": 27
},
{
"epoch": 0.9230769230769231,
"grad_norm": 27.153093338012695,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.6565,
"step": 28
},
{
"epoch": 0.9560439560439561,
"grad_norm": 24.350711822509766,
"learning_rate": 1.45e-06,
"loss": 0.5143,
"step": 29
},
{
"epoch": 0.989010989010989,
"grad_norm": 21.189594268798828,
"learning_rate": 1.5e-06,
"loss": 0.3953,
"step": 30
},
{
"epoch": 1.0,
"grad_norm": 21.189594268798828,
"learning_rate": 1.5500000000000002e-06,
"loss": 0.2703,
"step": 31
},
{
"epoch": 1.032967032967033,
"grad_norm": 25.850399017333984,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.2846,
"step": 32
},
{
"epoch": 1.065934065934066,
"grad_norm": 7.641750335693359,
"learning_rate": 1.6500000000000003e-06,
"loss": 0.1964,
"step": 33
},
{
"epoch": 1.098901098901099,
"grad_norm": 5.204587459564209,
"learning_rate": 1.7000000000000002e-06,
"loss": 0.195,
"step": 34
},
{
"epoch": 1.1318681318681318,
"grad_norm": 5.610931396484375,
"learning_rate": 1.75e-06,
"loss": 0.2097,
"step": 35
},
{
"epoch": 1.164835164835165,
"grad_norm": 3.679949998855591,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.1428,
"step": 36
},
{
"epoch": 1.1978021978021978,
"grad_norm": 2.877136707305908,
"learning_rate": 1.85e-06,
"loss": 0.1506,
"step": 37
},
{
"epoch": 1.2307692307692308,
"grad_norm": 1.788109302520752,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.1384,
"step": 38
},
{
"epoch": 1.2637362637362637,
"grad_norm": 1.154598355293274,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.1255,
"step": 39
},
{
"epoch": 1.2967032967032968,
"grad_norm": 1.1099777221679688,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.1291,
"step": 40
},
{
"epoch": 1.3296703296703296,
"grad_norm": 1.022336483001709,
"learning_rate": 2.05e-06,
"loss": 0.1149,
"step": 41
},
{
"epoch": 1.3626373626373627,
"grad_norm": 0.8197290897369385,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.1175,
"step": 42
},
{
"epoch": 1.3956043956043955,
"grad_norm": 1.283385992050171,
"learning_rate": 2.15e-06,
"loss": 0.1137,
"step": 43
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.9875780344009399,
"learning_rate": 2.2e-06,
"loss": 0.117,
"step": 44
},
{
"epoch": 1.4615384615384617,
"grad_norm": 0.5812683701515198,
"learning_rate": 2.25e-06,
"loss": 0.1089,
"step": 45
},
{
"epoch": 1.4945054945054945,
"grad_norm": 0.8154236674308777,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.1102,
"step": 46
},
{
"epoch": 1.5274725274725274,
"grad_norm": 0.6170194149017334,
"learning_rate": 2.35e-06,
"loss": 0.1108,
"step": 47
},
{
"epoch": 1.5604395604395604,
"grad_norm": 0.8121249675750732,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.1077,
"step": 48
},
{
"epoch": 1.5934065934065935,
"grad_norm": 0.7454224824905396,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.1089,
"step": 49
},
{
"epoch": 1.6263736263736264,
"grad_norm": 1.021628499031067,
"learning_rate": 2.5e-06,
"loss": 0.1075,
"step": 50
},
{
"epoch": 1.6593406593406592,
"grad_norm": 0.8242588639259338,
"learning_rate": 2.55e-06,
"loss": 0.1056,
"step": 51
},
{
"epoch": 1.6923076923076923,
"grad_norm": 0.7174047827720642,
"learning_rate": 2.6e-06,
"loss": 0.1062,
"step": 52
},
{
"epoch": 1.7252747252747254,
"grad_norm": 0.676979660987854,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.1062,
"step": 53
},
{
"epoch": 1.7582417582417582,
"grad_norm": 0.44768354296684265,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.1073,
"step": 54
},
{
"epoch": 1.791208791208791,
"grad_norm": 1.0064852237701416,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.1033,
"step": 55
},
{
"epoch": 1.8241758241758241,
"grad_norm": 1.3364107608795166,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.1041,
"step": 56
},
{
"epoch": 1.8571428571428572,
"grad_norm": 1.187800645828247,
"learning_rate": 2.85e-06,
"loss": 0.1054,
"step": 57
},
{
"epoch": 1.89010989010989,
"grad_norm": 1.457709550857544,
"learning_rate": 2.9e-06,
"loss": 0.1163,
"step": 58
},
{
"epoch": 1.9230769230769231,
"grad_norm": 1.1929093599319458,
"learning_rate": 2.95e-06,
"loss": 0.1049,
"step": 59
},
{
"epoch": 1.9560439560439562,
"grad_norm": 0.6893891096115112,
"learning_rate": 3e-06,
"loss": 0.1023,
"step": 60
},
{
"epoch": 1.989010989010989,
"grad_norm": 0.7837490439414978,
"learning_rate": 3.05e-06,
"loss": 0.1014,
"step": 61
},
{
"epoch": 2.0,
"grad_norm": 0.7837490439414978,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.1324,
"step": 62
},
{
"epoch": 2.032967032967033,
"grad_norm": 2.488649845123291,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.1017,
"step": 63
},
{
"epoch": 2.065934065934066,
"grad_norm": 1.104415774345398,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.1019,
"step": 64
},
{
"epoch": 2.098901098901099,
"grad_norm": 0.9327191710472107,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0996,
"step": 65
},
{
"epoch": 2.131868131868132,
"grad_norm": 1.17020583152771,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.0979,
"step": 66
},
{
"epoch": 2.1648351648351647,
"grad_norm": 0.6745622754096985,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0924,
"step": 67
},
{
"epoch": 2.197802197802198,
"grad_norm": 0.8487798571586609,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0993,
"step": 68
},
{
"epoch": 2.230769230769231,
"grad_norm": 1.7320159673690796,
"learning_rate": 3.45e-06,
"loss": 0.0929,
"step": 69
},
{
"epoch": 2.2637362637362637,
"grad_norm": 1.8361762762069702,
"learning_rate": 3.5e-06,
"loss": 0.0931,
"step": 70
},
{
"epoch": 2.2967032967032965,
"grad_norm": 0.8804886341094971,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0905,
"step": 71
},
{
"epoch": 2.32967032967033,
"grad_norm": 1.29473876953125,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0959,
"step": 72
},
{
"epoch": 2.3626373626373627,
"grad_norm": 1.2560906410217285,
"learning_rate": 3.65e-06,
"loss": 0.0954,
"step": 73
},
{
"epoch": 2.3956043956043955,
"grad_norm": 1.2681041955947876,
"learning_rate": 3.7e-06,
"loss": 0.0935,
"step": 74
},
{
"epoch": 2.4285714285714284,
"grad_norm": 1.6790293455123901,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0958,
"step": 75
},
{
"epoch": 2.4615384615384617,
"grad_norm": 1.250998854637146,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.0916,
"step": 76
},
{
"epoch": 2.4945054945054945,
"grad_norm": 1.1952019929885864,
"learning_rate": 3.85e-06,
"loss": 0.094,
"step": 77
},
{
"epoch": 2.5274725274725274,
"grad_norm": 1.381598711013794,
"learning_rate": 3.900000000000001e-06,
"loss": 0.0892,
"step": 78
},
{
"epoch": 2.5604395604395602,
"grad_norm": 0.9761469960212708,
"learning_rate": 3.95e-06,
"loss": 0.091,
"step": 79
},
{
"epoch": 2.5934065934065935,
"grad_norm": 1.5026003122329712,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0917,
"step": 80
},
{
"epoch": 2.6263736263736264,
"grad_norm": 3.112177848815918,
"learning_rate": 4.05e-06,
"loss": 0.0891,
"step": 81
},
{
"epoch": 2.659340659340659,
"grad_norm": 1.4188305139541626,
"learning_rate": 4.1e-06,
"loss": 0.0856,
"step": 82
},
{
"epoch": 2.6923076923076925,
"grad_norm": 1.1930654048919678,
"learning_rate": 4.15e-06,
"loss": 0.0796,
"step": 83
},
{
"epoch": 2.7252747252747254,
"grad_norm": 1.2071819305419922,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.0849,
"step": 84
},
{
"epoch": 2.758241758241758,
"grad_norm": 1.8237779140472412,
"learning_rate": 4.25e-06,
"loss": 0.0809,
"step": 85
},
{
"epoch": 2.791208791208791,
"grad_norm": 1.8066545724868774,
"learning_rate": 4.3e-06,
"loss": 0.0822,
"step": 86
},
{
"epoch": 2.824175824175824,
"grad_norm": 1.541412591934204,
"learning_rate": 4.350000000000001e-06,
"loss": 0.0737,
"step": 87
},
{
"epoch": 2.857142857142857,
"grad_norm": 1.6204791069030762,
"learning_rate": 4.4e-06,
"loss": 0.0701,
"step": 88
},
{
"epoch": 2.89010989010989,
"grad_norm": 1.6418696641921997,
"learning_rate": 4.450000000000001e-06,
"loss": 0.0693,
"step": 89
},
{
"epoch": 2.9230769230769234,
"grad_norm": 1.4522324800491333,
"learning_rate": 4.5e-06,
"loss": 0.0701,
"step": 90
},
{
"epoch": 2.956043956043956,
"grad_norm": 2.1432557106018066,
"learning_rate": 4.5500000000000005e-06,
"loss": 0.064,
"step": 91
},
{
"epoch": 2.989010989010989,
"grad_norm": 2.0663564205169678,
"learning_rate": 4.600000000000001e-06,
"loss": 0.0641,
"step": 92
},
{
"epoch": 3.0,
"grad_norm": 4.103982925415039,
"learning_rate": 4.65e-06,
"loss": 0.0537,
"step": 93
},
{
"epoch": 3.032967032967033,
"grad_norm": 1.6428738832473755,
"learning_rate": 4.7e-06,
"loss": 0.0455,
"step": 94
},
{
"epoch": 3.065934065934066,
"grad_norm": 1.7648729085922241,
"learning_rate": 4.75e-06,
"loss": 0.0556,
"step": 95
},
{
"epoch": 3.098901098901099,
"grad_norm": 1.8822323083877563,
"learning_rate": 4.800000000000001e-06,
"loss": 0.0517,
"step": 96
},
{
"epoch": 3.131868131868132,
"grad_norm": 2.105926752090454,
"learning_rate": 4.85e-06,
"loss": 0.0483,
"step": 97
},
{
"epoch": 3.1648351648351647,
"grad_norm": 1.5844470262527466,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.0448,
"step": 98
},
{
"epoch": 3.197802197802198,
"grad_norm": 2.6496734619140625,
"learning_rate": 4.95e-06,
"loss": 0.0419,
"step": 99
},
{
"epoch": 3.230769230769231,
"grad_norm": 1.711832880973816,
"learning_rate": 5e-06,
"loss": 0.0457,
"step": 100
},
{
"epoch": 3.2637362637362637,
"grad_norm": 2.0797977447509766,
"learning_rate": 4.998072590601808e-06,
"loss": 0.0392,
"step": 101
},
{
"epoch": 3.2967032967032965,
"grad_norm": 2.556063175201416,
"learning_rate": 4.992293334332821e-06,
"loss": 0.0393,
"step": 102
},
{
"epoch": 3.32967032967033,
"grad_norm": 2.772800922393799,
"learning_rate": 4.982671142387316e-06,
"loss": 0.0391,
"step": 103
},
{
"epoch": 3.3626373626373627,
"grad_norm": 3.494256019592285,
"learning_rate": 4.9692208514878445e-06,
"loss": 0.0385,
"step": 104
},
{
"epoch": 3.3956043956043955,
"grad_norm": 3.0490529537200928,
"learning_rate": 4.9519632010080765e-06,
"loss": 0.0471,
"step": 105
},
{
"epoch": 3.4285714285714284,
"grad_norm": 2.358599901199341,
"learning_rate": 4.930924800994192e-06,
"loss": 0.0369,
"step": 106
},
{
"epoch": 3.4615384615384617,
"grad_norm": 1.8047081232070923,
"learning_rate": 4.906138091134118e-06,
"loss": 0.0344,
"step": 107
},
{
"epoch": 3.4945054945054945,
"grad_norm": 2.1081414222717285,
"learning_rate": 4.8776412907378845e-06,
"loss": 0.0322,
"step": 108
},
{
"epoch": 3.5274725274725274,
"grad_norm": 2.368657112121582,
"learning_rate": 4.845478339806211e-06,
"loss": 0.0282,
"step": 109
},
{
"epoch": 3.5604395604395602,
"grad_norm": 1.6193580627441406,
"learning_rate": 4.809698831278217e-06,
"loss": 0.0294,
"step": 110
},
{
"epoch": 3.5934065934065935,
"grad_norm": 1.9592503309249878,
"learning_rate": 4.770357934562704e-06,
"loss": 0.0312,
"step": 111
},
{
"epoch": 3.6263736263736264,
"grad_norm": 1.9919159412384033,
"learning_rate": 4.72751631047092e-06,
"loss": 0.0281,
"step": 112
},
{
"epoch": 3.659340659340659,
"grad_norm": 1.4293889999389648,
"learning_rate": 4.681240017681994e-06,
"loss": 0.0236,
"step": 113
},
{
"epoch": 3.6923076923076925,
"grad_norm": 1.7509453296661377,
"learning_rate": 4.631600410885231e-06,
"loss": 0.0237,
"step": 114
},
{
"epoch": 3.7252747252747254,
"grad_norm": 2.26200008392334,
"learning_rate": 4.578674030756364e-06,
"loss": 0.023,
"step": 115
},
{
"epoch": 3.758241758241758,
"grad_norm": 3.075289011001587,
"learning_rate": 4.522542485937369e-06,
"loss": 0.025,
"step": 116
},
{
"epoch": 3.791208791208791,
"grad_norm": 2.381298065185547,
"learning_rate": 4.463292327201862e-06,
"loss": 0.0277,
"step": 117
},
{
"epoch": 3.824175824175824,
"grad_norm": 2.659548044204712,
"learning_rate": 4.401014914000078e-06,
"loss": 0.0274,
"step": 118
},
{
"epoch": 3.857142857142857,
"grad_norm": 2.317688465118408,
"learning_rate": 4.335806273589214e-06,
"loss": 0.0234,
"step": 119
},
{
"epoch": 3.89010989010989,
"grad_norm": 1.3909262418746948,
"learning_rate": 4.267766952966369e-06,
"loss": 0.0158,
"step": 120
},
{
"epoch": 3.9230769230769234,
"grad_norm": 6.296055793762207,
"learning_rate": 4.197001863832355e-06,
"loss": 0.0263,
"step": 121
},
{
"epoch": 3.956043956043956,
"grad_norm": 3.982027053833008,
"learning_rate": 4.123620120825459e-06,
"loss": 0.0184,
"step": 122
},
{
"epoch": 3.989010989010989,
"grad_norm": 1.9030568599700928,
"learning_rate": 4.047734873274586e-06,
"loss": 0.0201,
"step": 123
},
{
"epoch": 4.0,
"grad_norm": 1.9030568599700928,
"learning_rate": 3.969463130731183e-06,
"loss": 0.0212,
"step": 124
},
{
"epoch": 4.032967032967033,
"grad_norm": 4.881707191467285,
"learning_rate": 3.888925582549006e-06,
"loss": 0.0147,
"step": 125
},
{
"epoch": 4.065934065934066,
"grad_norm": 2.6425750255584717,
"learning_rate": 3.806246411789872e-06,
"loss": 0.0164,
"step": 126
},
{
"epoch": 4.0989010989010985,
"grad_norm": 1.239315152168274,
"learning_rate": 3.721553103742388e-06,
"loss": 0.0106,
"step": 127
},
{
"epoch": 4.131868131868132,
"grad_norm": 2.193694829940796,
"learning_rate": 3.634976249348867e-06,
"loss": 0.0118,
"step": 128
},
{
"epoch": 4.164835164835165,
"grad_norm": 1.5028455257415771,
"learning_rate": 3.5466493438435707e-06,
"loss": 0.0152,
"step": 129
},
{
"epoch": 4.197802197802198,
"grad_norm": 2.392939805984497,
"learning_rate": 3.4567085809127247e-06,
"loss": 0.0108,
"step": 130
},
{
"epoch": 4.230769230769231,
"grad_norm": 1.1450010538101196,
"learning_rate": 3.3652926426937327e-06,
"loss": 0.0148,
"step": 131
},
{
"epoch": 4.263736263736264,
"grad_norm": 1.7259258031845093,
"learning_rate": 3.272542485937369e-06,
"loss": 0.0106,
"step": 132
},
{
"epoch": 4.2967032967032965,
"grad_norm": 1.8394275903701782,
"learning_rate": 3.1786011246626858e-06,
"loss": 0.0113,
"step": 133
},
{
"epoch": 4.329670329670329,
"grad_norm": 1.0393275022506714,
"learning_rate": 3.0836134096397642e-06,
"loss": 0.0109,
"step": 134
},
{
"epoch": 4.362637362637362,
"grad_norm": 1.6944122314453125,
"learning_rate": 2.9877258050403214e-06,
"loss": 0.0098,
"step": 135
},
{
"epoch": 4.395604395604396,
"grad_norm": 1.4724135398864746,
"learning_rate": 2.8910861626005774e-06,
"loss": 0.0104,
"step": 136
},
{
"epoch": 4.428571428571429,
"grad_norm": 1.4693012237548828,
"learning_rate": 2.7938434936445946e-06,
"loss": 0.0078,
"step": 137
},
{
"epoch": 4.461538461538462,
"grad_norm": 1.7742702960968018,
"learning_rate": 2.696147739319613e-06,
"loss": 0.0143,
"step": 138
},
{
"epoch": 4.4945054945054945,
"grad_norm": 1.035766839981079,
"learning_rate": 2.5981495393976718e-06,
"loss": 0.0082,
"step": 139
},
{
"epoch": 4.527472527472527,
"grad_norm": 1.7405714988708496,
"learning_rate": 2.5e-06,
"loss": 0.0095,
"step": 140
},
{
"epoch": 4.56043956043956,
"grad_norm": 1.2387239933013916,
"learning_rate": 2.4018504606023295e-06,
"loss": 0.0097,
"step": 141
},
{
"epoch": 4.593406593406593,
"grad_norm": 1.6785768270492554,
"learning_rate": 2.3038522606803882e-06,
"loss": 0.0068,
"step": 142
},
{
"epoch": 4.626373626373626,
"grad_norm": 1.0219742059707642,
"learning_rate": 2.2061565063554063e-06,
"loss": 0.0056,
"step": 143
},
{
"epoch": 4.65934065934066,
"grad_norm": 1.2220392227172852,
"learning_rate": 2.1089138373994226e-06,
"loss": 0.0064,
"step": 144
},
{
"epoch": 4.6923076923076925,
"grad_norm": 1.1894707679748535,
"learning_rate": 2.01227419495968e-06,
"loss": 0.0095,
"step": 145
},
{
"epoch": 4.725274725274725,
"grad_norm": 2.7715442180633545,
"learning_rate": 1.9163865903602374e-06,
"loss": 0.0096,
"step": 146
},
{
"epoch": 4.758241758241758,
"grad_norm": 1.5755218267440796,
"learning_rate": 1.8213988753373147e-06,
"loss": 0.0105,
"step": 147
},
{
"epoch": 4.791208791208791,
"grad_norm": 1.045127034187317,
"learning_rate": 1.7274575140626318e-06,
"loss": 0.0058,
"step": 148
},
{
"epoch": 4.824175824175824,
"grad_norm": 1.336412787437439,
"learning_rate": 1.634707357306267e-06,
"loss": 0.0082,
"step": 149
},
{
"epoch": 4.857142857142857,
"grad_norm": 1.9876456260681152,
"learning_rate": 1.5432914190872757e-06,
"loss": 0.0038,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 180,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 30,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.704853821915136e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}