model_c2285a5d / checkpoint-148 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
8546388 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9820627802690582,
"eval_steps": 500,
"global_step": 148,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013452914798206279,
"grad_norm": 95.4926986694336,
"learning_rate": 5.0000000000000004e-08,
"loss": 12.2856,
"step": 1
},
{
"epoch": 0.026905829596412557,
"grad_norm": 93.69285583496094,
"learning_rate": 1.0000000000000001e-07,
"loss": 12.2383,
"step": 2
},
{
"epoch": 0.04035874439461883,
"grad_norm": 95.09840393066406,
"learning_rate": 1.5000000000000002e-07,
"loss": 12.1293,
"step": 3
},
{
"epoch": 0.053811659192825115,
"grad_norm": 95.04216766357422,
"learning_rate": 2.0000000000000002e-07,
"loss": 12.1453,
"step": 4
},
{
"epoch": 0.06726457399103139,
"grad_norm": 93.44210052490234,
"learning_rate": 2.5000000000000004e-07,
"loss": 12.165,
"step": 5
},
{
"epoch": 0.08071748878923767,
"grad_norm": 93.28514862060547,
"learning_rate": 3.0000000000000004e-07,
"loss": 12.063,
"step": 6
},
{
"epoch": 0.09417040358744394,
"grad_norm": 95.3654556274414,
"learning_rate": 3.5000000000000004e-07,
"loss": 11.9676,
"step": 7
},
{
"epoch": 0.10762331838565023,
"grad_norm": 96.05154418945312,
"learning_rate": 4.0000000000000003e-07,
"loss": 12.0911,
"step": 8
},
{
"epoch": 0.1210762331838565,
"grad_norm": 97.69881439208984,
"learning_rate": 4.5000000000000003e-07,
"loss": 12.0717,
"step": 9
},
{
"epoch": 0.13452914798206278,
"grad_norm": 95.55254364013672,
"learning_rate": 5.000000000000001e-07,
"loss": 11.9752,
"step": 10
},
{
"epoch": 0.14798206278026907,
"grad_norm": 95.9182357788086,
"learning_rate": 5.5e-07,
"loss": 11.9413,
"step": 11
},
{
"epoch": 0.16143497757847533,
"grad_norm": 95.40771484375,
"learning_rate": 6.000000000000001e-07,
"loss": 11.7523,
"step": 12
},
{
"epoch": 0.17488789237668162,
"grad_norm": 94.40055847167969,
"learning_rate": 6.5e-07,
"loss": 11.6384,
"step": 13
},
{
"epoch": 0.18834080717488788,
"grad_norm": 93.58352661132812,
"learning_rate": 7.000000000000001e-07,
"loss": 11.4293,
"step": 14
},
{
"epoch": 0.20179372197309417,
"grad_norm": 94.48737335205078,
"learning_rate": 7.5e-07,
"loss": 11.1445,
"step": 15
},
{
"epoch": 0.21524663677130046,
"grad_norm": 92.44265747070312,
"learning_rate": 8.000000000000001e-07,
"loss": 10.7705,
"step": 16
},
{
"epoch": 0.22869955156950672,
"grad_norm": 90.97422790527344,
"learning_rate": 8.500000000000001e-07,
"loss": 10.3754,
"step": 17
},
{
"epoch": 0.242152466367713,
"grad_norm": 88.54856872558594,
"learning_rate": 9.000000000000001e-07,
"loss": 10.0019,
"step": 18
},
{
"epoch": 0.2556053811659193,
"grad_norm": 88.39138793945312,
"learning_rate": 9.500000000000001e-07,
"loss": 9.5093,
"step": 19
},
{
"epoch": 0.26905829596412556,
"grad_norm": 86.55109405517578,
"learning_rate": 1.0000000000000002e-06,
"loss": 9.2342,
"step": 20
},
{
"epoch": 0.2825112107623318,
"grad_norm": 80.62335205078125,
"learning_rate": 1.0500000000000001e-06,
"loss": 8.597,
"step": 21
},
{
"epoch": 0.29596412556053814,
"grad_norm": 73.67768859863281,
"learning_rate": 1.1e-06,
"loss": 8.1122,
"step": 22
},
{
"epoch": 0.3094170403587444,
"grad_norm": 64.57353210449219,
"learning_rate": 1.1500000000000002e-06,
"loss": 7.6455,
"step": 23
},
{
"epoch": 0.32286995515695066,
"grad_norm": 55.2818603515625,
"learning_rate": 1.2000000000000002e-06,
"loss": 7.2493,
"step": 24
},
{
"epoch": 0.336322869955157,
"grad_norm": 48.274452209472656,
"learning_rate": 1.25e-06,
"loss": 7.0377,
"step": 25
},
{
"epoch": 0.34977578475336324,
"grad_norm": 42.7370491027832,
"learning_rate": 1.3e-06,
"loss": 6.5782,
"step": 26
},
{
"epoch": 0.3632286995515695,
"grad_norm": 39.297462463378906,
"learning_rate": 1.3500000000000002e-06,
"loss": 6.2558,
"step": 27
},
{
"epoch": 0.37668161434977576,
"grad_norm": 37.91667938232422,
"learning_rate": 1.4000000000000001e-06,
"loss": 5.9809,
"step": 28
},
{
"epoch": 0.3901345291479821,
"grad_norm": 37.87322998046875,
"learning_rate": 1.45e-06,
"loss": 5.7268,
"step": 29
},
{
"epoch": 0.40358744394618834,
"grad_norm": 36.48906707763672,
"learning_rate": 1.5e-06,
"loss": 5.449,
"step": 30
},
{
"epoch": 0.4170403587443946,
"grad_norm": 36.38510513305664,
"learning_rate": 1.5500000000000002e-06,
"loss": 5.1884,
"step": 31
},
{
"epoch": 0.4304932735426009,
"grad_norm": 35.656829833984375,
"learning_rate": 1.6000000000000001e-06,
"loss": 4.899,
"step": 32
},
{
"epoch": 0.4439461883408072,
"grad_norm": 34.09960174560547,
"learning_rate": 1.6500000000000003e-06,
"loss": 4.5842,
"step": 33
},
{
"epoch": 0.45739910313901344,
"grad_norm": 32.74240493774414,
"learning_rate": 1.7000000000000002e-06,
"loss": 4.3009,
"step": 34
},
{
"epoch": 0.47085201793721976,
"grad_norm": 31.867507934570312,
"learning_rate": 1.75e-06,
"loss": 3.9865,
"step": 35
},
{
"epoch": 0.484304932735426,
"grad_norm": 30.741374969482422,
"learning_rate": 1.8000000000000001e-06,
"loss": 3.6916,
"step": 36
},
{
"epoch": 0.4977578475336323,
"grad_norm": 27.8775577545166,
"learning_rate": 1.85e-06,
"loss": 3.3719,
"step": 37
},
{
"epoch": 0.5112107623318386,
"grad_norm": 25.97083282470703,
"learning_rate": 1.9000000000000002e-06,
"loss": 3.0907,
"step": 38
},
{
"epoch": 0.5246636771300448,
"grad_norm": 23.62006950378418,
"learning_rate": 1.9500000000000004e-06,
"loss": 2.8336,
"step": 39
},
{
"epoch": 0.5381165919282511,
"grad_norm": 23.80520248413086,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.5717,
"step": 40
},
{
"epoch": 0.5515695067264574,
"grad_norm": 25.32924461364746,
"learning_rate": 2.05e-06,
"loss": 2.3658,
"step": 41
},
{
"epoch": 0.5650224215246636,
"grad_norm": 26.20570182800293,
"learning_rate": 2.1000000000000002e-06,
"loss": 2.2443,
"step": 42
},
{
"epoch": 0.57847533632287,
"grad_norm": 24.581693649291992,
"learning_rate": 2.15e-06,
"loss": 1.926,
"step": 43
},
{
"epoch": 0.5919282511210763,
"grad_norm": 24.414310455322266,
"learning_rate": 2.2e-06,
"loss": 1.7034,
"step": 44
},
{
"epoch": 0.6053811659192825,
"grad_norm": 22.691083908081055,
"learning_rate": 2.25e-06,
"loss": 1.4857,
"step": 45
},
{
"epoch": 0.6188340807174888,
"grad_norm": 20.669803619384766,
"learning_rate": 2.3000000000000004e-06,
"loss": 1.2415,
"step": 46
},
{
"epoch": 0.6322869955156951,
"grad_norm": 20.149641036987305,
"learning_rate": 2.35e-06,
"loss": 0.997,
"step": 47
},
{
"epoch": 0.6457399103139013,
"grad_norm": 18.632596969604492,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.7552,
"step": 48
},
{
"epoch": 0.6591928251121076,
"grad_norm": 16.93793296813965,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.5883,
"step": 49
},
{
"epoch": 0.672645739910314,
"grad_norm": 14.432519912719727,
"learning_rate": 2.5e-06,
"loss": 0.4382,
"step": 50
},
{
"epoch": 0.6860986547085202,
"grad_norm": 11.829660415649414,
"learning_rate": 2.55e-06,
"loss": 0.2983,
"step": 51
},
{
"epoch": 0.6995515695067265,
"grad_norm": 8.680500030517578,
"learning_rate": 2.6e-06,
"loss": 0.1988,
"step": 52
},
{
"epoch": 0.7130044843049327,
"grad_norm": 6.53156852722168,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.1589,
"step": 53
},
{
"epoch": 0.726457399103139,
"grad_norm": 2.9756624698638916,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0686,
"step": 54
},
{
"epoch": 0.7399103139013453,
"grad_norm": 5.545580863952637,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0865,
"step": 55
},
{
"epoch": 0.7533632286995515,
"grad_norm": 4.045405387878418,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0949,
"step": 56
},
{
"epoch": 0.7668161434977578,
"grad_norm": 1.6688120365142822,
"learning_rate": 2.85e-06,
"loss": 0.0396,
"step": 57
},
{
"epoch": 0.7802690582959642,
"grad_norm": 2.4520657062530518,
"learning_rate": 2.9e-06,
"loss": 0.0439,
"step": 58
},
{
"epoch": 0.7937219730941704,
"grad_norm": 2.608729600906372,
"learning_rate": 2.95e-06,
"loss": 0.057,
"step": 59
},
{
"epoch": 0.8071748878923767,
"grad_norm": 2.365234851837158,
"learning_rate": 3e-06,
"loss": 0.0547,
"step": 60
},
{
"epoch": 0.820627802690583,
"grad_norm": 0.787550687789917,
"learning_rate": 3.05e-06,
"loss": 0.0209,
"step": 61
},
{
"epoch": 0.8340807174887892,
"grad_norm": 0.7686442732810974,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.0221,
"step": 62
},
{
"epoch": 0.8475336322869955,
"grad_norm": 1.2510555982589722,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0165,
"step": 63
},
{
"epoch": 0.8609865470852018,
"grad_norm": 0.8923770189285278,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0187,
"step": 64
},
{
"epoch": 0.874439461883408,
"grad_norm": 0.8052615523338318,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0266,
"step": 65
},
{
"epoch": 0.8878923766816144,
"grad_norm": 0.6710303425788879,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.0154,
"step": 66
},
{
"epoch": 0.9013452914798207,
"grad_norm": 0.5213025212287903,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0085,
"step": 67
},
{
"epoch": 0.9147982062780269,
"grad_norm": 0.5758580565452576,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0133,
"step": 68
},
{
"epoch": 0.9282511210762332,
"grad_norm": 0.6828752160072327,
"learning_rate": 3.45e-06,
"loss": 0.0186,
"step": 69
},
{
"epoch": 0.9417040358744395,
"grad_norm": 0.6814988255500793,
"learning_rate": 3.5e-06,
"loss": 0.0215,
"step": 70
},
{
"epoch": 0.9551569506726457,
"grad_norm": 0.718296229839325,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0204,
"step": 71
},
{
"epoch": 0.968609865470852,
"grad_norm": 0.7816944122314453,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0184,
"step": 72
},
{
"epoch": 0.9820627802690582,
"grad_norm": 0.6058817505836487,
"learning_rate": 3.65e-06,
"loss": 0.0179,
"step": 73
},
{
"epoch": 0.9955156950672646,
"grad_norm": 1.0496101379394531,
"learning_rate": 3.7e-06,
"loss": 0.032,
"step": 74
},
{
"epoch": 1.0,
"grad_norm": 1.0496101379394531,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0137,
"step": 75
},
{
"epoch": 1.0134529147982063,
"grad_norm": 1.598720669746399,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.0211,
"step": 76
},
{
"epoch": 1.0269058295964126,
"grad_norm": 0.7792187333106995,
"learning_rate": 3.85e-06,
"loss": 0.0213,
"step": 77
},
{
"epoch": 1.0403587443946187,
"grad_norm": 0.7717252373695374,
"learning_rate": 3.900000000000001e-06,
"loss": 0.0151,
"step": 78
},
{
"epoch": 1.053811659192825,
"grad_norm": 0.39334648847579956,
"learning_rate": 3.95e-06,
"loss": 0.0186,
"step": 79
},
{
"epoch": 1.0672645739910314,
"grad_norm": 0.9775457382202148,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0043,
"step": 80
},
{
"epoch": 1.0807174887892377,
"grad_norm": 1.2425150871276855,
"learning_rate": 4.05e-06,
"loss": 0.0179,
"step": 81
},
{
"epoch": 1.094170403587444,
"grad_norm": 0.3884654939174652,
"learning_rate": 4.1e-06,
"loss": 0.008,
"step": 82
},
{
"epoch": 1.1076233183856503,
"grad_norm": 0.4746466875076294,
"learning_rate": 4.15e-06,
"loss": 0.0144,
"step": 83
},
{
"epoch": 1.1210762331838564,
"grad_norm": 0.6812214255332947,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.0072,
"step": 84
},
{
"epoch": 1.1345291479820627,
"grad_norm": 0.5414469838142395,
"learning_rate": 4.25e-06,
"loss": 0.016,
"step": 85
},
{
"epoch": 1.147982062780269,
"grad_norm": 0.8709024786949158,
"learning_rate": 4.3e-06,
"loss": 0.0154,
"step": 86
},
{
"epoch": 1.1614349775784754,
"grad_norm": 0.3181096613407135,
"learning_rate": 4.350000000000001e-06,
"loss": 0.0039,
"step": 87
},
{
"epoch": 1.1748878923766817,
"grad_norm": 0.2581265866756439,
"learning_rate": 4.4e-06,
"loss": 0.0014,
"step": 88
},
{
"epoch": 1.188340807174888,
"grad_norm": 0.10870776325464249,
"learning_rate": 4.450000000000001e-06,
"loss": 0.0088,
"step": 89
},
{
"epoch": 1.201793721973094,
"grad_norm": 0.45314452052116394,
"learning_rate": 4.5e-06,
"loss": 0.0019,
"step": 90
},
{
"epoch": 1.2152466367713004,
"grad_norm": 0.7328381538391113,
"learning_rate": 4.5500000000000005e-06,
"loss": 0.0082,
"step": 91
},
{
"epoch": 1.2286995515695067,
"grad_norm": 0.6641069650650024,
"learning_rate": 4.600000000000001e-06,
"loss": 0.0075,
"step": 92
},
{
"epoch": 1.242152466367713,
"grad_norm": 0.27984222769737244,
"learning_rate": 4.65e-06,
"loss": 0.0009,
"step": 93
},
{
"epoch": 1.2556053811659194,
"grad_norm": 0.8341127634048462,
"learning_rate": 4.7e-06,
"loss": 0.0204,
"step": 94
},
{
"epoch": 1.2690582959641254,
"grad_norm": 1.0140557289123535,
"learning_rate": 4.75e-06,
"loss": 0.0228,
"step": 95
},
{
"epoch": 1.2825112107623318,
"grad_norm": 0.9439787268638611,
"learning_rate": 4.800000000000001e-06,
"loss": 0.027,
"step": 96
},
{
"epoch": 1.295964125560538,
"grad_norm": 1.3867762088775635,
"learning_rate": 4.85e-06,
"loss": 0.0223,
"step": 97
},
{
"epoch": 1.3094170403587444,
"grad_norm": 0.4747941493988037,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.0048,
"step": 98
},
{
"epoch": 1.3228699551569507,
"grad_norm": 0.5673424601554871,
"learning_rate": 4.95e-06,
"loss": 0.0033,
"step": 99
},
{
"epoch": 1.336322869955157,
"grad_norm": 0.24510182440280914,
"learning_rate": 5e-06,
"loss": 0.0048,
"step": 100
},
{
"epoch": 1.3497757847533634,
"grad_norm": 0.6084151268005371,
"learning_rate": 4.99989574668946e-06,
"loss": 0.032,
"step": 101
},
{
"epoch": 1.3632286995515694,
"grad_norm": 0.606272280216217,
"learning_rate": 4.999582995452842e-06,
"loss": 0.0058,
"step": 102
},
{
"epoch": 1.3766816143497758,
"grad_norm": 0.2270481288433075,
"learning_rate": 4.999061772374426e-06,
"loss": 0.0043,
"step": 103
},
{
"epoch": 1.390134529147982,
"grad_norm": 0.2832431495189667,
"learning_rate": 4.998332120925598e-06,
"loss": 0.0079,
"step": 104
},
{
"epoch": 1.4035874439461884,
"grad_norm": 0.44565466046333313,
"learning_rate": 4.9973941019612235e-06,
"loss": 0.017,
"step": 105
},
{
"epoch": 1.4170403587443947,
"grad_norm": 0.3518397808074951,
"learning_rate": 4.996247793714565e-06,
"loss": 0.0057,
"step": 106
},
{
"epoch": 1.4304932735426008,
"grad_norm": 0.41977858543395996,
"learning_rate": 4.994893291790768e-06,
"loss": 0.0056,
"step": 107
},
{
"epoch": 1.4439461883408071,
"grad_norm": 0.22567600011825562,
"learning_rate": 4.993330709158879e-06,
"loss": 0.0067,
"step": 108
},
{
"epoch": 1.4573991031390134,
"grad_norm": 0.2184637039899826,
"learning_rate": 4.9915601761424304e-06,
"loss": 0.0043,
"step": 109
},
{
"epoch": 1.4708520179372198,
"grad_norm": 0.24834735691547394,
"learning_rate": 4.989581840408562e-06,
"loss": 0.0034,
"step": 110
},
{
"epoch": 1.484304932735426,
"grad_norm": 0.13165591657161713,
"learning_rate": 4.987395866955716e-06,
"loss": 0.0041,
"step": 111
},
{
"epoch": 1.4977578475336322,
"grad_norm": 0.27152982354164124,
"learning_rate": 4.9850024380998655e-06,
"loss": 0.0034,
"step": 112
},
{
"epoch": 1.5112107623318387,
"grad_norm": 0.2793160676956177,
"learning_rate": 4.982401753459317e-06,
"loss": 0.0049,
"step": 113
},
{
"epoch": 1.5246636771300448,
"grad_norm": 0.06674113124608994,
"learning_rate": 4.979594029938058e-06,
"loss": 0.0034,
"step": 114
},
{
"epoch": 1.5381165919282511,
"grad_norm": 0.2601087689399719,
"learning_rate": 4.976579501707665e-06,
"loss": 0.0025,
"step": 115
},
{
"epoch": 1.5515695067264574,
"grad_norm": 0.17075951397418976,
"learning_rate": 4.973358420187776e-06,
"loss": 0.0005,
"step": 116
},
{
"epoch": 1.5650224215246635,
"grad_norm": 0.11211276799440384,
"learning_rate": 4.969931054025122e-06,
"loss": 0.0059,
"step": 117
},
{
"epoch": 1.57847533632287,
"grad_norm": 0.5140838027000427,
"learning_rate": 4.966297689071117e-06,
"loss": 0.018,
"step": 118
},
{
"epoch": 1.5919282511210762,
"grad_norm": 0.5356995463371277,
"learning_rate": 4.962458628358021e-06,
"loss": 0.0007,
"step": 119
},
{
"epoch": 1.6053811659192825,
"grad_norm": 0.3967442214488983,
"learning_rate": 4.958414192073665e-06,
"loss": 0.014,
"step": 120
},
{
"epoch": 1.6188340807174888,
"grad_norm": 0.4567921757698059,
"learning_rate": 4.954164717534748e-06,
"loss": 0.0027,
"step": 121
},
{
"epoch": 1.6322869955156951,
"grad_norm": 0.4405613839626312,
"learning_rate": 4.949710559158699e-06,
"loss": 0.0135,
"step": 122
},
{
"epoch": 1.6457399103139014,
"grad_norm": 0.7164422869682312,
"learning_rate": 4.945052088434123e-06,
"loss": 0.0207,
"step": 123
},
{
"epoch": 1.6591928251121075,
"grad_norm": 0.47173142433166504,
"learning_rate": 4.940189693889819e-06,
"loss": 0.0019,
"step": 124
},
{
"epoch": 1.672645739910314,
"grad_norm": 0.4606887996196747,
"learning_rate": 4.9351237810623655e-06,
"loss": 0.0125,
"step": 125
},
{
"epoch": 1.6860986547085202,
"grad_norm": 0.7209401726722717,
"learning_rate": 4.929854772462312e-06,
"loss": 0.012,
"step": 126
},
{
"epoch": 1.6995515695067265,
"grad_norm": 1.7617985010147095,
"learning_rate": 4.924383107538929e-06,
"loss": 0.0079,
"step": 127
},
{
"epoch": 1.7130044843049328,
"grad_norm": 1.0861084461212158,
"learning_rate": 4.918709242643563e-06,
"loss": 0.0091,
"step": 128
},
{
"epoch": 1.726457399103139,
"grad_norm": 0.11263061314821243,
"learning_rate": 4.9128336509915746e-06,
"loss": 0.0006,
"step": 129
},
{
"epoch": 1.7399103139013454,
"grad_norm": 0.3411642909049988,
"learning_rate": 4.906756822622865e-06,
"loss": 0.0037,
"step": 130
},
{
"epoch": 1.7533632286995515,
"grad_norm": 0.1550491899251938,
"learning_rate": 4.900479264361017e-06,
"loss": 0.0056,
"step": 131
},
{
"epoch": 1.7668161434977578,
"grad_norm": 0.8448930382728577,
"learning_rate": 4.894001499771015e-06,
"loss": 0.0135,
"step": 132
},
{
"epoch": 1.7802690582959642,
"grad_norm": 0.5105615258216858,
"learning_rate": 4.887324069115582e-06,
"loss": 0.009,
"step": 133
},
{
"epoch": 1.7937219730941703,
"grad_norm": 0.5042226910591125,
"learning_rate": 4.880447529310118e-06,
"loss": 0.0022,
"step": 134
},
{
"epoch": 1.8071748878923768,
"grad_norm": 0.15797697007656097,
"learning_rate": 4.873372453876255e-06,
"loss": 0.0073,
"step": 135
},
{
"epoch": 1.8206278026905829,
"grad_norm": 0.47805944085121155,
"learning_rate": 4.866099432894023e-06,
"loss": 0.0084,
"step": 136
},
{
"epoch": 1.8340807174887892,
"grad_norm": 0.3370952904224396,
"learning_rate": 4.858629072952635e-06,
"loss": 0.0037,
"step": 137
},
{
"epoch": 1.8475336322869955,
"grad_norm": 0.30983835458755493,
"learning_rate": 4.850961997099892e-06,
"loss": 0.0024,
"step": 138
},
{
"epoch": 1.8609865470852018,
"grad_norm": 0.2800588011741638,
"learning_rate": 4.843098844790228e-06,
"loss": 0.0032,
"step": 139
},
{
"epoch": 1.8744394618834082,
"grad_norm": 0.2037343531847,
"learning_rate": 4.835040271831371e-06,
"loss": 0.0016,
"step": 140
},
{
"epoch": 1.8878923766816142,
"grad_norm": 0.45981553196907043,
"learning_rate": 4.826786950329646e-06,
"loss": 0.0035,
"step": 141
},
{
"epoch": 1.9013452914798208,
"grad_norm": 0.17092454433441162,
"learning_rate": 4.818339568633926e-06,
"loss": 0.0068,
"step": 142
},
{
"epoch": 1.9147982062780269,
"grad_norm": 0.5339077711105347,
"learning_rate": 4.809698831278217e-06,
"loss": 0.0026,
"step": 143
},
{
"epoch": 1.9282511210762332,
"grad_norm": 0.5208529829978943,
"learning_rate": 4.800865458922899e-06,
"loss": 0.0047,
"step": 144
},
{
"epoch": 1.9417040358744395,
"grad_norm": 0.7488933801651001,
"learning_rate": 4.79184018829462e-06,
"loss": 0.0096,
"step": 145
},
{
"epoch": 1.9551569506726456,
"grad_norm": 0.5727106332778931,
"learning_rate": 4.782623772124854e-06,
"loss": 0.001,
"step": 146
},
{
"epoch": 1.9686098654708521,
"grad_norm": 0.3813195526599884,
"learning_rate": 4.77321697908712e-06,
"loss": 0.0085,
"step": 147
},
{
"epoch": 1.9820627802690582,
"grad_norm": 0.5406109094619751,
"learning_rate": 4.763620593732867e-06,
"loss": 0.0017,
"step": 148
}
],
"logging_steps": 1,
"max_steps": 444,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 74,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.3004773594628096e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}