model_e31d585d / checkpoint-233 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
129e4cf verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9971469329529244,
"eval_steps": 500,
"global_step": 233,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0042796005706134095,
"grad_norm": 34.48759460449219,
"learning_rate": 5.0000000000000004e-08,
"loss": 4.9483,
"step": 1
},
{
"epoch": 0.008559201141226819,
"grad_norm": 35.18870544433594,
"learning_rate": 1.0000000000000001e-07,
"loss": 5.1789,
"step": 2
},
{
"epoch": 0.012838801711840228,
"grad_norm": 34.3349494934082,
"learning_rate": 1.5000000000000002e-07,
"loss": 4.8881,
"step": 3
},
{
"epoch": 0.017118402282453638,
"grad_norm": 36.14989471435547,
"learning_rate": 2.0000000000000002e-07,
"loss": 5.0836,
"step": 4
},
{
"epoch": 0.021398002853067047,
"grad_norm": 33.93682861328125,
"learning_rate": 2.5000000000000004e-07,
"loss": 4.9619,
"step": 5
},
{
"epoch": 0.025677603423680456,
"grad_norm": 34.47760772705078,
"learning_rate": 3.0000000000000004e-07,
"loss": 4.9723,
"step": 6
},
{
"epoch": 0.029957203994293864,
"grad_norm": 34.52993392944336,
"learning_rate": 3.5000000000000004e-07,
"loss": 4.9278,
"step": 7
},
{
"epoch": 0.034236804564907276,
"grad_norm": 33.86513900756836,
"learning_rate": 4.0000000000000003e-07,
"loss": 4.9141,
"step": 8
},
{
"epoch": 0.03851640513552068,
"grad_norm": 32.446311950683594,
"learning_rate": 4.5000000000000003e-07,
"loss": 4.7811,
"step": 9
},
{
"epoch": 0.042796005706134094,
"grad_norm": 33.34199905395508,
"learning_rate": 5.000000000000001e-07,
"loss": 4.8566,
"step": 10
},
{
"epoch": 0.047075606276747506,
"grad_norm": 34.60908508300781,
"learning_rate": 5.5e-07,
"loss": 4.9406,
"step": 11
},
{
"epoch": 0.05135520684736091,
"grad_norm": 34.570987701416016,
"learning_rate": 6.000000000000001e-07,
"loss": 4.8281,
"step": 12
},
{
"epoch": 0.05563480741797432,
"grad_norm": 32.070953369140625,
"learning_rate": 6.5e-07,
"loss": 4.6165,
"step": 13
},
{
"epoch": 0.05991440798858773,
"grad_norm": 33.586509704589844,
"learning_rate": 7.000000000000001e-07,
"loss": 4.8012,
"step": 14
},
{
"epoch": 0.06419400855920114,
"grad_norm": 30.96600341796875,
"learning_rate": 7.5e-07,
"loss": 4.5845,
"step": 15
},
{
"epoch": 0.06847360912981455,
"grad_norm": 30.152454376220703,
"learning_rate": 8.000000000000001e-07,
"loss": 4.4356,
"step": 16
},
{
"epoch": 0.07275320970042796,
"grad_norm": 28.61970329284668,
"learning_rate": 8.500000000000001e-07,
"loss": 4.359,
"step": 17
},
{
"epoch": 0.07703281027104136,
"grad_norm": 27.507383346557617,
"learning_rate": 9.000000000000001e-07,
"loss": 4.4086,
"step": 18
},
{
"epoch": 0.08131241084165478,
"grad_norm": 25.110597610473633,
"learning_rate": 9.500000000000001e-07,
"loss": 4.1411,
"step": 19
},
{
"epoch": 0.08559201141226819,
"grad_norm": 23.737529754638672,
"learning_rate": 1.0000000000000002e-06,
"loss": 4.0639,
"step": 20
},
{
"epoch": 0.0898716119828816,
"grad_norm": 22.502748489379883,
"learning_rate": 1.0500000000000001e-06,
"loss": 3.9744,
"step": 21
},
{
"epoch": 0.09415121255349501,
"grad_norm": 21.957761764526367,
"learning_rate": 1.1e-06,
"loss": 3.8115,
"step": 22
},
{
"epoch": 0.09843081312410841,
"grad_norm": 20.552080154418945,
"learning_rate": 1.1500000000000002e-06,
"loss": 3.6059,
"step": 23
},
{
"epoch": 0.10271041369472182,
"grad_norm": 21.490442276000977,
"learning_rate": 1.2000000000000002e-06,
"loss": 3.5285,
"step": 24
},
{
"epoch": 0.10699001426533523,
"grad_norm": 20.63368034362793,
"learning_rate": 1.25e-06,
"loss": 3.3484,
"step": 25
},
{
"epoch": 0.11126961483594865,
"grad_norm": 20.170631408691406,
"learning_rate": 1.3e-06,
"loss": 3.0973,
"step": 26
},
{
"epoch": 0.11554921540656206,
"grad_norm": 20.217880249023438,
"learning_rate": 1.3500000000000002e-06,
"loss": 2.9758,
"step": 27
},
{
"epoch": 0.11982881597717546,
"grad_norm": 18.303476333618164,
"learning_rate": 1.4000000000000001e-06,
"loss": 2.8659,
"step": 28
},
{
"epoch": 0.12410841654778887,
"grad_norm": 15.922847747802734,
"learning_rate": 1.45e-06,
"loss": 2.6369,
"step": 29
},
{
"epoch": 0.12838801711840228,
"grad_norm": 14.529817581176758,
"learning_rate": 1.5e-06,
"loss": 2.5532,
"step": 30
},
{
"epoch": 0.1326676176890157,
"grad_norm": 14.177539825439453,
"learning_rate": 1.5500000000000002e-06,
"loss": 2.4497,
"step": 31
},
{
"epoch": 0.1369472182596291,
"grad_norm": 15.144468307495117,
"learning_rate": 1.6000000000000001e-06,
"loss": 2.3432,
"step": 32
},
{
"epoch": 0.14122681883024252,
"grad_norm": 14.920907974243164,
"learning_rate": 1.6500000000000003e-06,
"loss": 2.14,
"step": 33
},
{
"epoch": 0.14550641940085593,
"grad_norm": 15.700516700744629,
"learning_rate": 1.7000000000000002e-06,
"loss": 2.0247,
"step": 34
},
{
"epoch": 0.14978601997146934,
"grad_norm": 15.600655555725098,
"learning_rate": 1.75e-06,
"loss": 1.8893,
"step": 35
},
{
"epoch": 0.15406562054208273,
"grad_norm": 15.281895637512207,
"learning_rate": 1.8000000000000001e-06,
"loss": 1.7768,
"step": 36
},
{
"epoch": 0.15834522111269614,
"grad_norm": 13.99860668182373,
"learning_rate": 1.85e-06,
"loss": 1.5486,
"step": 37
},
{
"epoch": 0.16262482168330955,
"grad_norm": 13.295161247253418,
"learning_rate": 1.9000000000000002e-06,
"loss": 1.427,
"step": 38
},
{
"epoch": 0.16690442225392296,
"grad_norm": 12.667908668518066,
"learning_rate": 1.9500000000000004e-06,
"loss": 1.2251,
"step": 39
},
{
"epoch": 0.17118402282453637,
"grad_norm": 12.730255126953125,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.1402,
"step": 40
},
{
"epoch": 0.1754636233951498,
"grad_norm": 13.135544776916504,
"learning_rate": 2.05e-06,
"loss": 0.9867,
"step": 41
},
{
"epoch": 0.1797432239657632,
"grad_norm": 12.429509162902832,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.8455,
"step": 42
},
{
"epoch": 0.1840228245363766,
"grad_norm": 11.61819076538086,
"learning_rate": 2.15e-06,
"loss": 0.6935,
"step": 43
},
{
"epoch": 0.18830242510699002,
"grad_norm": 10.11789608001709,
"learning_rate": 2.2e-06,
"loss": 0.5486,
"step": 44
},
{
"epoch": 0.19258202567760344,
"grad_norm": 8.705474853515625,
"learning_rate": 2.25e-06,
"loss": 0.4345,
"step": 45
},
{
"epoch": 0.19686162624821682,
"grad_norm": 7.407937526702881,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.3223,
"step": 46
},
{
"epoch": 0.20114122681883023,
"grad_norm": 5.553786754608154,
"learning_rate": 2.35e-06,
"loss": 0.2376,
"step": 47
},
{
"epoch": 0.20542082738944364,
"grad_norm": 3.248605966567993,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.198,
"step": 48
},
{
"epoch": 0.20970042796005706,
"grad_norm": 3.1356797218322754,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.1706,
"step": 49
},
{
"epoch": 0.21398002853067047,
"grad_norm": 2.7913248538970947,
"learning_rate": 2.5e-06,
"loss": 0.1436,
"step": 50
},
{
"epoch": 0.21825962910128388,
"grad_norm": 2.2059645652770996,
"learning_rate": 2.55e-06,
"loss": 0.144,
"step": 51
},
{
"epoch": 0.2225392296718973,
"grad_norm": 1.5722575187683105,
"learning_rate": 2.6e-06,
"loss": 0.1203,
"step": 52
},
{
"epoch": 0.2268188302425107,
"grad_norm": 1.3645284175872803,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.1309,
"step": 53
},
{
"epoch": 0.23109843081312412,
"grad_norm": 1.3658019304275513,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.1122,
"step": 54
},
{
"epoch": 0.23537803138373753,
"grad_norm": 1.1719582080841064,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.1239,
"step": 55
},
{
"epoch": 0.2396576319543509,
"grad_norm": 1.030941367149353,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.1124,
"step": 56
},
{
"epoch": 0.24393723252496433,
"grad_norm": 0.7212232947349548,
"learning_rate": 2.85e-06,
"loss": 0.0848,
"step": 57
},
{
"epoch": 0.24821683309557774,
"grad_norm": 0.8292589783668518,
"learning_rate": 2.9e-06,
"loss": 0.0976,
"step": 58
},
{
"epoch": 0.2524964336661912,
"grad_norm": 1.0893069505691528,
"learning_rate": 2.95e-06,
"loss": 0.0954,
"step": 59
},
{
"epoch": 0.25677603423680456,
"grad_norm": 0.8184618353843689,
"learning_rate": 3e-06,
"loss": 0.1029,
"step": 60
},
{
"epoch": 0.26105563480741795,
"grad_norm": 0.551844596862793,
"learning_rate": 3.05e-06,
"loss": 0.0846,
"step": 61
},
{
"epoch": 0.2653352353780314,
"grad_norm": 0.6214303970336914,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.0855,
"step": 62
},
{
"epoch": 0.26961483594864477,
"grad_norm": 0.6127054691314697,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0858,
"step": 63
},
{
"epoch": 0.2738944365192582,
"grad_norm": 0.6121392250061035,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0913,
"step": 64
},
{
"epoch": 0.2781740370898716,
"grad_norm": 0.5743774771690369,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0813,
"step": 65
},
{
"epoch": 0.28245363766048504,
"grad_norm": 0.6720462441444397,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.0871,
"step": 66
},
{
"epoch": 0.2867332382310984,
"grad_norm": 0.5387166142463684,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0722,
"step": 67
},
{
"epoch": 0.29101283880171186,
"grad_norm": 0.4062807261943817,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0699,
"step": 68
},
{
"epoch": 0.29529243937232524,
"grad_norm": 0.43954649567604065,
"learning_rate": 3.45e-06,
"loss": 0.0808,
"step": 69
},
{
"epoch": 0.2995720399429387,
"grad_norm": 0.5136083960533142,
"learning_rate": 3.5e-06,
"loss": 0.0782,
"step": 70
},
{
"epoch": 0.30385164051355207,
"grad_norm": 0.43194347620010376,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0883,
"step": 71
},
{
"epoch": 0.30813124108416545,
"grad_norm": 0.42371511459350586,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0875,
"step": 72
},
{
"epoch": 0.3124108416547789,
"grad_norm": 0.4384445250034332,
"learning_rate": 3.65e-06,
"loss": 0.0842,
"step": 73
},
{
"epoch": 0.3166904422253923,
"grad_norm": 0.3909939229488373,
"learning_rate": 3.7e-06,
"loss": 0.0718,
"step": 74
},
{
"epoch": 0.3209700427960057,
"grad_norm": 0.33483919501304626,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0766,
"step": 75
},
{
"epoch": 0.3252496433666191,
"grad_norm": 0.6500667929649353,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.0841,
"step": 76
},
{
"epoch": 0.32952924393723254,
"grad_norm": 0.42915841937065125,
"learning_rate": 3.85e-06,
"loss": 0.0743,
"step": 77
},
{
"epoch": 0.3338088445078459,
"grad_norm": 0.5390797853469849,
"learning_rate": 3.900000000000001e-06,
"loss": 0.0784,
"step": 78
},
{
"epoch": 0.33808844507845937,
"grad_norm": 0.4562499225139618,
"learning_rate": 3.95e-06,
"loss": 0.0768,
"step": 79
},
{
"epoch": 0.34236804564907275,
"grad_norm": 0.45602527260780334,
"learning_rate": 4.000000000000001e-06,
"loss": 0.076,
"step": 80
},
{
"epoch": 0.3466476462196862,
"grad_norm": 0.3802862763404846,
"learning_rate": 4.05e-06,
"loss": 0.0776,
"step": 81
},
{
"epoch": 0.3509272467902996,
"grad_norm": 0.3189656138420105,
"learning_rate": 4.1e-06,
"loss": 0.0695,
"step": 82
},
{
"epoch": 0.35520684736091296,
"grad_norm": 0.37464478611946106,
"learning_rate": 4.15e-06,
"loss": 0.0732,
"step": 83
},
{
"epoch": 0.3594864479315264,
"grad_norm": 0.5200878977775574,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.0646,
"step": 84
},
{
"epoch": 0.3637660485021398,
"grad_norm": 0.3902634382247925,
"learning_rate": 4.25e-06,
"loss": 0.0751,
"step": 85
},
{
"epoch": 0.3680456490727532,
"grad_norm": 0.4341444969177246,
"learning_rate": 4.3e-06,
"loss": 0.0739,
"step": 86
},
{
"epoch": 0.3723252496433666,
"grad_norm": 0.3288861811161041,
"learning_rate": 4.350000000000001e-06,
"loss": 0.0631,
"step": 87
},
{
"epoch": 0.37660485021398005,
"grad_norm": 0.33849674463272095,
"learning_rate": 4.4e-06,
"loss": 0.0663,
"step": 88
},
{
"epoch": 0.38088445078459343,
"grad_norm": 0.36165380477905273,
"learning_rate": 4.450000000000001e-06,
"loss": 0.0618,
"step": 89
},
{
"epoch": 0.38516405135520687,
"grad_norm": 0.5453753471374512,
"learning_rate": 4.5e-06,
"loss": 0.0734,
"step": 90
},
{
"epoch": 0.38944365192582026,
"grad_norm": 0.4055081605911255,
"learning_rate": 4.5500000000000005e-06,
"loss": 0.0684,
"step": 91
},
{
"epoch": 0.39372325249643364,
"grad_norm": 0.4177473783493042,
"learning_rate": 4.600000000000001e-06,
"loss": 0.0605,
"step": 92
},
{
"epoch": 0.3980028530670471,
"grad_norm": 0.35140708088874817,
"learning_rate": 4.65e-06,
"loss": 0.0695,
"step": 93
},
{
"epoch": 0.40228245363766046,
"grad_norm": 0.5472511053085327,
"learning_rate": 4.7e-06,
"loss": 0.0743,
"step": 94
},
{
"epoch": 0.4065620542082739,
"grad_norm": 0.35946714878082275,
"learning_rate": 4.75e-06,
"loss": 0.0658,
"step": 95
},
{
"epoch": 0.4108416547788873,
"grad_norm": 0.3542482852935791,
"learning_rate": 4.800000000000001e-06,
"loss": 0.0707,
"step": 96
},
{
"epoch": 0.41512125534950073,
"grad_norm": 0.5081551671028137,
"learning_rate": 4.85e-06,
"loss": 0.0774,
"step": 97
},
{
"epoch": 0.4194008559201141,
"grad_norm": 0.3265087902545929,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.0715,
"step": 98
},
{
"epoch": 0.42368045649072755,
"grad_norm": 0.4452495574951172,
"learning_rate": 4.95e-06,
"loss": 0.064,
"step": 99
},
{
"epoch": 0.42796005706134094,
"grad_norm": 0.33817920088768005,
"learning_rate": 5e-06,
"loss": 0.0674,
"step": 100
},
{
"epoch": 0.4322396576319544,
"grad_norm": 0.4477202296257019,
"learning_rate": 4.9999926774868305e-06,
"loss": 0.0701,
"step": 101
},
{
"epoch": 0.43651925820256776,
"grad_norm": 0.3426607847213745,
"learning_rate": 4.999970709990216e-06,
"loss": 0.0637,
"step": 102
},
{
"epoch": 0.44079885877318115,
"grad_norm": 0.4300517737865448,
"learning_rate": 4.999934097638842e-06,
"loss": 0.0716,
"step": 103
},
{
"epoch": 0.4450784593437946,
"grad_norm": 0.29154735803604126,
"learning_rate": 4.9998828406471855e-06,
"loss": 0.066,
"step": 104
},
{
"epoch": 0.44935805991440797,
"grad_norm": 0.38544416427612305,
"learning_rate": 4.99981693931551e-06,
"loss": 0.0735,
"step": 105
},
{
"epoch": 0.4536376604850214,
"grad_norm": 0.42879173159599304,
"learning_rate": 4.999736394029866e-06,
"loss": 0.0724,
"step": 106
},
{
"epoch": 0.4579172610556348,
"grad_norm": 0.45247069001197815,
"learning_rate": 4.999641205262088e-06,
"loss": 0.0624,
"step": 107
},
{
"epoch": 0.46219686162624823,
"grad_norm": 0.45127418637275696,
"learning_rate": 4.9995313735697935e-06,
"loss": 0.0635,
"step": 108
},
{
"epoch": 0.4664764621968616,
"grad_norm": 0.4134274125099182,
"learning_rate": 4.999406899596378e-06,
"loss": 0.0654,
"step": 109
},
{
"epoch": 0.47075606276747506,
"grad_norm": 0.5010129809379578,
"learning_rate": 4.99926778407101e-06,
"loss": 0.0678,
"step": 110
},
{
"epoch": 0.47503566333808844,
"grad_norm": 0.7583813667297363,
"learning_rate": 4.999114027808632e-06,
"loss": 0.0729,
"step": 111
},
{
"epoch": 0.4793152639087018,
"grad_norm": 0.39662906527519226,
"learning_rate": 4.998945631709948e-06,
"loss": 0.0688,
"step": 112
},
{
"epoch": 0.48359486447931527,
"grad_norm": 0.33845070004463196,
"learning_rate": 4.998762596761424e-06,
"loss": 0.0689,
"step": 113
},
{
"epoch": 0.48787446504992865,
"grad_norm": 0.6589358448982239,
"learning_rate": 4.998564924035282e-06,
"loss": 0.0664,
"step": 114
},
{
"epoch": 0.4921540656205421,
"grad_norm": 0.4025883078575134,
"learning_rate": 4.99835261468949e-06,
"loss": 0.0731,
"step": 115
},
{
"epoch": 0.4964336661911555,
"grad_norm": 0.32868894934654236,
"learning_rate": 4.998125669967758e-06,
"loss": 0.0657,
"step": 116
},
{
"epoch": 0.5007132667617689,
"grad_norm": 0.3705316483974457,
"learning_rate": 4.997884091199531e-06,
"loss": 0.0668,
"step": 117
},
{
"epoch": 0.5049928673323824,
"grad_norm": 0.32613426446914673,
"learning_rate": 4.997627879799981e-06,
"loss": 0.0654,
"step": 118
},
{
"epoch": 0.5092724679029957,
"grad_norm": 0.42335274815559387,
"learning_rate": 4.997357037269996e-06,
"loss": 0.0695,
"step": 119
},
{
"epoch": 0.5135520684736091,
"grad_norm": 0.39254000782966614,
"learning_rate": 4.997071565196175e-06,
"loss": 0.0663,
"step": 120
},
{
"epoch": 0.5178316690442225,
"grad_norm": 0.33641666173934937,
"learning_rate": 4.996771465250814e-06,
"loss": 0.0626,
"step": 121
},
{
"epoch": 0.5221112696148359,
"grad_norm": 0.38578540086746216,
"learning_rate": 4.996456739191905e-06,
"loss": 0.064,
"step": 122
},
{
"epoch": 0.5263908701854494,
"grad_norm": 0.31561926007270813,
"learning_rate": 4.996127388863116e-06,
"loss": 0.0626,
"step": 123
},
{
"epoch": 0.5306704707560628,
"grad_norm": 0.27469775080680847,
"learning_rate": 4.995783416193782e-06,
"loss": 0.0617,
"step": 124
},
{
"epoch": 0.5349500713266762,
"grad_norm": 0.2643572986125946,
"learning_rate": 4.9954248231989016e-06,
"loss": 0.0592,
"step": 125
},
{
"epoch": 0.5392296718972895,
"grad_norm": 0.32749131321907043,
"learning_rate": 4.995051611979115e-06,
"loss": 0.0597,
"step": 126
},
{
"epoch": 0.543509272467903,
"grad_norm": 0.4380977153778076,
"learning_rate": 4.994663784720698e-06,
"loss": 0.0663,
"step": 127
},
{
"epoch": 0.5477888730385164,
"grad_norm": 0.30976516008377075,
"learning_rate": 4.994261343695546e-06,
"loss": 0.0634,
"step": 128
},
{
"epoch": 0.5520684736091298,
"grad_norm": 0.4299394488334656,
"learning_rate": 4.9938442912611625e-06,
"loss": 0.065,
"step": 129
},
{
"epoch": 0.5563480741797432,
"grad_norm": 0.3520357012748718,
"learning_rate": 4.993412629860646e-06,
"loss": 0.059,
"step": 130
},
{
"epoch": 0.5606276747503567,
"grad_norm": 0.44977450370788574,
"learning_rate": 4.9929663620226734e-06,
"loss": 0.0639,
"step": 131
},
{
"epoch": 0.5649072753209701,
"grad_norm": 0.3896942436695099,
"learning_rate": 4.992505490361487e-06,
"loss": 0.0608,
"step": 132
},
{
"epoch": 0.5691868758915835,
"grad_norm": 0.3307473063468933,
"learning_rate": 4.992030017576876e-06,
"loss": 0.0649,
"step": 133
},
{
"epoch": 0.5734664764621968,
"grad_norm": 0.27266785502433777,
"learning_rate": 4.991539946454166e-06,
"loss": 0.058,
"step": 134
},
{
"epoch": 0.5777460770328102,
"grad_norm": 0.43257489800453186,
"learning_rate": 4.991035279864199e-06,
"loss": 0.0651,
"step": 135
},
{
"epoch": 0.5820256776034237,
"grad_norm": 0.33029407262802124,
"learning_rate": 4.990516020763317e-06,
"loss": 0.0601,
"step": 136
},
{
"epoch": 0.5863052781740371,
"grad_norm": 0.40888652205467224,
"learning_rate": 4.989982172193346e-06,
"loss": 0.0676,
"step": 137
},
{
"epoch": 0.5905848787446505,
"grad_norm": 0.2740240693092346,
"learning_rate": 4.989433737281576e-06,
"loss": 0.0599,
"step": 138
},
{
"epoch": 0.5948644793152639,
"grad_norm": 0.3733709156513214,
"learning_rate": 4.988870719240744e-06,
"loss": 0.0571,
"step": 139
},
{
"epoch": 0.5991440798858774,
"grad_norm": 0.33992213010787964,
"learning_rate": 4.988293121369016e-06,
"loss": 0.0615,
"step": 140
},
{
"epoch": 0.6034236804564908,
"grad_norm": 0.34187227487564087,
"learning_rate": 4.987700947049966e-06,
"loss": 0.0625,
"step": 141
},
{
"epoch": 0.6077032810271041,
"grad_norm": 0.35290244221687317,
"learning_rate": 4.987094199752558e-06,
"loss": 0.0606,
"step": 142
},
{
"epoch": 0.6119828815977175,
"grad_norm": 0.2595992982387543,
"learning_rate": 4.986472883031124e-06,
"loss": 0.0551,
"step": 143
},
{
"epoch": 0.6162624821683309,
"grad_norm": 0.43302714824676514,
"learning_rate": 4.9858370005253435e-06,
"loss": 0.0643,
"step": 144
},
{
"epoch": 0.6205420827389444,
"grad_norm": 0.3430291414260864,
"learning_rate": 4.985186555960223e-06,
"loss": 0.0576,
"step": 145
},
{
"epoch": 0.6248216833095578,
"grad_norm": 0.5924923419952393,
"learning_rate": 4.984521553146074e-06,
"loss": 0.0679,
"step": 146
},
{
"epoch": 0.6291012838801712,
"grad_norm": 0.3352043330669403,
"learning_rate": 4.98384199597849e-06,
"loss": 0.0636,
"step": 147
},
{
"epoch": 0.6333808844507846,
"grad_norm": 0.31009000539779663,
"learning_rate": 4.983147888438324e-06,
"loss": 0.0499,
"step": 148
},
{
"epoch": 0.637660485021398,
"grad_norm": 0.41210371255874634,
"learning_rate": 4.982439234591665e-06,
"loss": 0.0632,
"step": 149
},
{
"epoch": 0.6419400855920114,
"grad_norm": 0.30768534541130066,
"learning_rate": 4.9817160385898145e-06,
"loss": 0.0669,
"step": 150
},
{
"epoch": 0.6462196861626248,
"grad_norm": 0.4492046535015106,
"learning_rate": 4.980978304669263e-06,
"loss": 0.0603,
"step": 151
},
{
"epoch": 0.6504992867332382,
"grad_norm": 0.3134528696537018,
"learning_rate": 4.9802260371516635e-06,
"loss": 0.0553,
"step": 152
},
{
"epoch": 0.6547788873038516,
"grad_norm": 0.33358660340309143,
"learning_rate": 4.979459240443806e-06,
"loss": 0.061,
"step": 153
},
{
"epoch": 0.6590584878744651,
"grad_norm": 0.2705594301223755,
"learning_rate": 4.978677919037594e-06,
"loss": 0.0525,
"step": 154
},
{
"epoch": 0.6633380884450785,
"grad_norm": 0.4429916739463806,
"learning_rate": 4.977882077510018e-06,
"loss": 0.0595,
"step": 155
},
{
"epoch": 0.6676176890156919,
"grad_norm": 0.37663739919662476,
"learning_rate": 4.977071720523125e-06,
"loss": 0.0596,
"step": 156
},
{
"epoch": 0.6718972895863052,
"grad_norm": 0.5795411467552185,
"learning_rate": 4.9762468528239945e-06,
"loss": 0.0629,
"step": 157
},
{
"epoch": 0.6761768901569187,
"grad_norm": 0.2995544373989105,
"learning_rate": 4.975407479244711e-06,
"loss": 0.0577,
"step": 158
},
{
"epoch": 0.6804564907275321,
"grad_norm": 0.2688761055469513,
"learning_rate": 4.974553604702332e-06,
"loss": 0.0612,
"step": 159
},
{
"epoch": 0.6847360912981455,
"grad_norm": 0.32369598746299744,
"learning_rate": 4.9736852341988655e-06,
"loss": 0.054,
"step": 160
},
{
"epoch": 0.6890156918687589,
"grad_norm": 0.24816952645778656,
"learning_rate": 4.972802372821235e-06,
"loss": 0.0631,
"step": 161
},
{
"epoch": 0.6932952924393724,
"grad_norm": 0.33390095829963684,
"learning_rate": 4.97190502574125e-06,
"loss": 0.0629,
"step": 162
},
{
"epoch": 0.6975748930099858,
"grad_norm": 0.2538573443889618,
"learning_rate": 4.9709931982155805e-06,
"loss": 0.0585,
"step": 163
},
{
"epoch": 0.7018544935805991,
"grad_norm": 0.337729275226593,
"learning_rate": 4.970066895585722e-06,
"loss": 0.0534,
"step": 164
},
{
"epoch": 0.7061340941512125,
"grad_norm": 0.3311474621295929,
"learning_rate": 4.9691261232779644e-06,
"loss": 0.0592,
"step": 165
},
{
"epoch": 0.7104136947218259,
"grad_norm": 0.4538433849811554,
"learning_rate": 4.968170886803361e-06,
"loss": 0.0607,
"step": 166
},
{
"epoch": 0.7146932952924394,
"grad_norm": 0.3233712911605835,
"learning_rate": 4.9672011917577e-06,
"loss": 0.0557,
"step": 167
},
{
"epoch": 0.7189728958630528,
"grad_norm": 0.41257205605506897,
"learning_rate": 4.966217043821462e-06,
"loss": 0.0618,
"step": 168
},
{
"epoch": 0.7232524964336662,
"grad_norm": 0.36823365092277527,
"learning_rate": 4.965218448759797e-06,
"loss": 0.0619,
"step": 169
},
{
"epoch": 0.7275320970042796,
"grad_norm": 0.38311564922332764,
"learning_rate": 4.964205412422487e-06,
"loss": 0.0668,
"step": 170
},
{
"epoch": 0.7318116975748931,
"grad_norm": 0.31038492918014526,
"learning_rate": 4.963177940743908e-06,
"loss": 0.0573,
"step": 171
},
{
"epoch": 0.7360912981455064,
"grad_norm": 0.2835666835308075,
"learning_rate": 4.962136039742998e-06,
"loss": 0.0649,
"step": 172
},
{
"epoch": 0.7403708987161198,
"grad_norm": 0.4525282680988312,
"learning_rate": 4.961079715523228e-06,
"loss": 0.0593,
"step": 173
},
{
"epoch": 0.7446504992867332,
"grad_norm": 0.33394986391067505,
"learning_rate": 4.9600089742725545e-06,
"loss": 0.0601,
"step": 174
},
{
"epoch": 0.7489300998573466,
"grad_norm": 0.2642901837825775,
"learning_rate": 4.95892382226339e-06,
"loss": 0.0623,
"step": 175
},
{
"epoch": 0.7532097004279601,
"grad_norm": 0.34347841143608093,
"learning_rate": 4.95782426585257e-06,
"loss": 0.0568,
"step": 176
},
{
"epoch": 0.7574893009985735,
"grad_norm": 0.30288106203079224,
"learning_rate": 4.956710311481303e-06,
"loss": 0.0569,
"step": 177
},
{
"epoch": 0.7617689015691869,
"grad_norm": 0.285087913274765,
"learning_rate": 4.955581965675148e-06,
"loss": 0.0534,
"step": 178
},
{
"epoch": 0.7660485021398002,
"grad_norm": 0.2861518859863281,
"learning_rate": 4.954439235043966e-06,
"loss": 0.0536,
"step": 179
},
{
"epoch": 0.7703281027104137,
"grad_norm": 0.4016513526439667,
"learning_rate": 4.953282126281884e-06,
"loss": 0.0575,
"step": 180
},
{
"epoch": 0.7746077032810271,
"grad_norm": 0.4838986098766327,
"learning_rate": 4.9521106461672605e-06,
"loss": 0.0666,
"step": 181
},
{
"epoch": 0.7788873038516405,
"grad_norm": 0.270673006772995,
"learning_rate": 4.950924801562636e-06,
"loss": 0.0579,
"step": 182
},
{
"epoch": 0.7831669044222539,
"grad_norm": 0.31113702058792114,
"learning_rate": 4.9497245994147e-06,
"loss": 0.0546,
"step": 183
},
{
"epoch": 0.7874465049928673,
"grad_norm": 0.2960825264453888,
"learning_rate": 4.948510046754251e-06,
"loss": 0.0553,
"step": 184
},
{
"epoch": 0.7917261055634808,
"grad_norm": 0.3720844089984894,
"learning_rate": 4.947281150696151e-06,
"loss": 0.0607,
"step": 185
},
{
"epoch": 0.7960057061340942,
"grad_norm": 0.2569741904735565,
"learning_rate": 4.946037918439285e-06,
"loss": 0.0552,
"step": 186
},
{
"epoch": 0.8002853067047075,
"grad_norm": 0.3115193843841553,
"learning_rate": 4.944780357266522e-06,
"loss": 0.0544,
"step": 187
},
{
"epoch": 0.8045649072753209,
"grad_norm": 0.2828395366668701,
"learning_rate": 4.943508474544667e-06,
"loss": 0.0541,
"step": 188
},
{
"epoch": 0.8088445078459344,
"grad_norm": 0.29760900139808655,
"learning_rate": 4.942222277724424e-06,
"loss": 0.0576,
"step": 189
},
{
"epoch": 0.8131241084165478,
"grad_norm": 0.3851959705352783,
"learning_rate": 4.940921774340347e-06,
"loss": 0.059,
"step": 190
},
{
"epoch": 0.8174037089871612,
"grad_norm": 0.35299891233444214,
"learning_rate": 4.939606972010798e-06,
"loss": 0.0627,
"step": 191
},
{
"epoch": 0.8216833095577746,
"grad_norm": 0.30920645594596863,
"learning_rate": 4.9382778784379036e-06,
"loss": 0.053,
"step": 192
},
{
"epoch": 0.8259629101283881,
"grad_norm": 0.31357741355895996,
"learning_rate": 4.936934501407507e-06,
"loss": 0.0523,
"step": 193
},
{
"epoch": 0.8302425106990015,
"grad_norm": 0.28295785188674927,
"learning_rate": 4.935576848789127e-06,
"loss": 0.0521,
"step": 194
},
{
"epoch": 0.8345221112696148,
"grad_norm": 0.3303717076778412,
"learning_rate": 4.934204928535905e-06,
"loss": 0.0564,
"step": 195
},
{
"epoch": 0.8388017118402282,
"grad_norm": 0.31010597944259644,
"learning_rate": 4.932818748684565e-06,
"loss": 0.0495,
"step": 196
},
{
"epoch": 0.8430813124108416,
"grad_norm": 0.33654817938804626,
"learning_rate": 4.931418317355362e-06,
"loss": 0.0536,
"step": 197
},
{
"epoch": 0.8473609129814551,
"grad_norm": 0.35488635301589966,
"learning_rate": 4.9300036427520396e-06,
"loss": 0.054,
"step": 198
},
{
"epoch": 0.8516405135520685,
"grad_norm": 0.37233811616897583,
"learning_rate": 4.928574733161775e-06,
"loss": 0.0527,
"step": 199
},
{
"epoch": 0.8559201141226819,
"grad_norm": 0.3253094553947449,
"learning_rate": 4.927131596955137e-06,
"loss": 0.0545,
"step": 200
},
{
"epoch": 0.8601997146932953,
"grad_norm": 0.345102995634079,
"learning_rate": 4.92567424258603e-06,
"loss": 0.0606,
"step": 201
},
{
"epoch": 0.8644793152639088,
"grad_norm": 0.32971855998039246,
"learning_rate": 4.924202678591655e-06,
"loss": 0.0542,
"step": 202
},
{
"epoch": 0.8687589158345221,
"grad_norm": 0.30636993050575256,
"learning_rate": 4.922716913592447e-06,
"loss": 0.0509,
"step": 203
},
{
"epoch": 0.8730385164051355,
"grad_norm": 0.27384838461875916,
"learning_rate": 4.921216956292033e-06,
"loss": 0.0496,
"step": 204
},
{
"epoch": 0.8773181169757489,
"grad_norm": 0.30968862771987915,
"learning_rate": 4.919702815477179e-06,
"loss": 0.0608,
"step": 205
},
{
"epoch": 0.8815977175463623,
"grad_norm": 0.25974956154823303,
"learning_rate": 4.918174500017739e-06,
"loss": 0.0543,
"step": 206
},
{
"epoch": 0.8858773181169758,
"grad_norm": 0.30472415685653687,
"learning_rate": 4.916632018866599e-06,
"loss": 0.0567,
"step": 207
},
{
"epoch": 0.8901569186875892,
"grad_norm": 0.3091549277305603,
"learning_rate": 4.91507538105963e-06,
"loss": 0.0511,
"step": 208
},
{
"epoch": 0.8944365192582026,
"grad_norm": 0.26851367950439453,
"learning_rate": 4.9135045957156356e-06,
"loss": 0.0536,
"step": 209
},
{
"epoch": 0.8987161198288159,
"grad_norm": 0.25963643193244934,
"learning_rate": 4.911919672036291e-06,
"loss": 0.0509,
"step": 210
},
{
"epoch": 0.9029957203994294,
"grad_norm": 0.28279975056648254,
"learning_rate": 4.910320619306094e-06,
"loss": 0.0538,
"step": 211
},
{
"epoch": 0.9072753209700428,
"grad_norm": 0.26387840509414673,
"learning_rate": 4.908707446892315e-06,
"loss": 0.0504,
"step": 212
},
{
"epoch": 0.9115549215406562,
"grad_norm": 0.2996926009654999,
"learning_rate": 4.907080164244934e-06,
"loss": 0.0497,
"step": 213
},
{
"epoch": 0.9158345221112696,
"grad_norm": 0.4072028398513794,
"learning_rate": 4.905438780896589e-06,
"loss": 0.053,
"step": 214
},
{
"epoch": 0.920114122681883,
"grad_norm": 0.5946587324142456,
"learning_rate": 4.903783306462522e-06,
"loss": 0.0577,
"step": 215
},
{
"epoch": 0.9243937232524965,
"grad_norm": 0.34546515345573425,
"learning_rate": 4.9021137506405205e-06,
"loss": 0.0554,
"step": 216
},
{
"epoch": 0.9286733238231099,
"grad_norm": 0.3417337238788605,
"learning_rate": 4.9004301232108585e-06,
"loss": 0.0529,
"step": 217
},
{
"epoch": 0.9329529243937232,
"grad_norm": 0.31937581300735474,
"learning_rate": 4.8987324340362445e-06,
"loss": 0.0536,
"step": 218
},
{
"epoch": 0.9372325249643366,
"grad_norm": 0.28485214710235596,
"learning_rate": 4.897020693061758e-06,
"loss": 0.0488,
"step": 219
},
{
"epoch": 0.9415121255349501,
"grad_norm": 0.3833935558795929,
"learning_rate": 4.895294910314797e-06,
"loss": 0.0577,
"step": 220
},
{
"epoch": 0.9457917261055635,
"grad_norm": 0.2772054076194763,
"learning_rate": 4.8935550959050135e-06,
"loss": 0.0506,
"step": 221
},
{
"epoch": 0.9500713266761769,
"grad_norm": 0.35764405131340027,
"learning_rate": 4.891801260024262e-06,
"loss": 0.0518,
"step": 222
},
{
"epoch": 0.9543509272467903,
"grad_norm": 0.30019721388816833,
"learning_rate": 4.890033412946527e-06,
"loss": 0.0525,
"step": 223
},
{
"epoch": 0.9586305278174037,
"grad_norm": 0.3105527460575104,
"learning_rate": 4.888251565027879e-06,
"loss": 0.0552,
"step": 224
},
{
"epoch": 0.9629101283880172,
"grad_norm": 0.35943159461021423,
"learning_rate": 4.8864557267064e-06,
"loss": 0.0543,
"step": 225
},
{
"epoch": 0.9671897289586305,
"grad_norm": 0.3295605480670929,
"learning_rate": 4.88464590850213e-06,
"loss": 0.0565,
"step": 226
},
{
"epoch": 0.9714693295292439,
"grad_norm": 0.31729599833488464,
"learning_rate": 4.882822121017004e-06,
"loss": 0.0524,
"step": 227
},
{
"epoch": 0.9757489300998573,
"grad_norm": 0.4707350432872772,
"learning_rate": 4.880984374934788e-06,
"loss": 0.0589,
"step": 228
},
{
"epoch": 0.9800285306704708,
"grad_norm": 0.35285064578056335,
"learning_rate": 4.879132681021017e-06,
"loss": 0.0509,
"step": 229
},
{
"epoch": 0.9843081312410842,
"grad_norm": 0.32979413866996765,
"learning_rate": 4.877267050122934e-06,
"loss": 0.051,
"step": 230
},
{
"epoch": 0.9885877318116976,
"grad_norm": 0.3415399491786957,
"learning_rate": 4.8753874931694254e-06,
"loss": 0.0507,
"step": 231
},
{
"epoch": 0.992867332382311,
"grad_norm": 0.3240598738193512,
"learning_rate": 4.8734940211709535e-06,
"loss": 0.0524,
"step": 232
},
{
"epoch": 0.9971469329529244,
"grad_norm": 0.31305283308029175,
"learning_rate": 4.871586645219499e-06,
"loss": 0.0528,
"step": 233
}
],
"logging_steps": 1,
"max_steps": 1398,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 233,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.219533199339684e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}