MNLP_M2_rag_model / trainer_state.json
ellendagher's picture
Upload folder using huggingface_hub
353361b verified
{
"best_global_step": 7028,
"best_metric": 0.767158567905426,
"best_model_checkpoint": "./mcqa_qwen3_letter/checkpoint-7028",
"epoch": 0.9998932953939178,
"eval_steps": 500,
"global_step": 7028,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007113640405477503,
"grad_norm": 20.374778747558594,
"learning_rate": 1.337126600284495e-06,
"loss": 2.342,
"step": 50
},
{
"epoch": 0.014227280810955006,
"grad_norm": 23.720001220703125,
"learning_rate": 2.7596017069701283e-06,
"loss": 1.6092,
"step": 100
},
{
"epoch": 0.02134092121643251,
"grad_norm": 14.003034591674805,
"learning_rate": 4.182076813655762e-06,
"loss": 1.4564,
"step": 150
},
{
"epoch": 0.028454561621910013,
"grad_norm": 11.145376205444336,
"learning_rate": 5.604551920341395e-06,
"loss": 1.481,
"step": 200
},
{
"epoch": 0.035568202027387516,
"grad_norm": 14.064179420471191,
"learning_rate": 7.027027027027028e-06,
"loss": 1.4483,
"step": 250
},
{
"epoch": 0.04268184243286502,
"grad_norm": 14.6947021484375,
"learning_rate": 8.44950213371266e-06,
"loss": 1.4355,
"step": 300
},
{
"epoch": 0.04979548283834252,
"grad_norm": 11.520552635192871,
"learning_rate": 9.871977240398294e-06,
"loss": 1.4314,
"step": 350
},
{
"epoch": 0.056909123243820026,
"grad_norm": 11.234253883361816,
"learning_rate": 1.1294452347083926e-05,
"loss": 1.3494,
"step": 400
},
{
"epoch": 0.06402276364929753,
"grad_norm": 9.99629020690918,
"learning_rate": 1.271692745376956e-05,
"loss": 1.3342,
"step": 450
},
{
"epoch": 0.07113640405477503,
"grad_norm": 8.978431701660156,
"learning_rate": 1.4139402560455193e-05,
"loss": 1.2951,
"step": 500
},
{
"epoch": 0.07825004446025254,
"grad_norm": 9.42021369934082,
"learning_rate": 1.5561877667140826e-05,
"loss": 1.3058,
"step": 550
},
{
"epoch": 0.08536368486573004,
"grad_norm": 9.690632820129395,
"learning_rate": 1.698435277382646e-05,
"loss": 1.2561,
"step": 600
},
{
"epoch": 0.09247732527120754,
"grad_norm": 10.151556015014648,
"learning_rate": 1.8406827880512092e-05,
"loss": 1.2415,
"step": 650
},
{
"epoch": 0.09959096567668504,
"grad_norm": 8.446418762207031,
"learning_rate": 1.9829302987197725e-05,
"loss": 1.3303,
"step": 700
},
{
"epoch": 0.10670460608216255,
"grad_norm": 9.451847076416016,
"learning_rate": 1.9860869565217395e-05,
"loss": 1.2735,
"step": 750
},
{
"epoch": 0.11381824648764005,
"grad_norm": 9.343132019042969,
"learning_rate": 1.9702766798418973e-05,
"loss": 1.3471,
"step": 800
},
{
"epoch": 0.12093188689311755,
"grad_norm": 9.069653511047363,
"learning_rate": 1.9544664031620554e-05,
"loss": 1.2535,
"step": 850
},
{
"epoch": 0.12804552729859506,
"grad_norm": 5.554582595825195,
"learning_rate": 1.9386561264822135e-05,
"loss": 1.2887,
"step": 900
},
{
"epoch": 0.13515916770407255,
"grad_norm": 8.713581085205078,
"learning_rate": 1.9228458498023717e-05,
"loss": 1.1681,
"step": 950
},
{
"epoch": 0.14227280810955006,
"grad_norm": 8.742165565490723,
"learning_rate": 1.9070355731225298e-05,
"loss": 1.2271,
"step": 1000
},
{
"epoch": 0.14938644851502755,
"grad_norm": 8.755561828613281,
"learning_rate": 1.891225296442688e-05,
"loss": 1.2557,
"step": 1050
},
{
"epoch": 0.15650008892050507,
"grad_norm": 7.881649017333984,
"learning_rate": 1.875415019762846e-05,
"loss": 1.1999,
"step": 1100
},
{
"epoch": 0.16361372932598256,
"grad_norm": 9.955330848693848,
"learning_rate": 1.8596047430830042e-05,
"loss": 1.1375,
"step": 1150
},
{
"epoch": 0.17072736973146008,
"grad_norm": 9.453360557556152,
"learning_rate": 1.843794466403162e-05,
"loss": 1.2173,
"step": 1200
},
{
"epoch": 0.17784101013693757,
"grad_norm": 6.806760787963867,
"learning_rate": 1.8279841897233205e-05,
"loss": 1.2116,
"step": 1250
},
{
"epoch": 0.18495465054241508,
"grad_norm": 13.604923248291016,
"learning_rate": 1.8121739130434783e-05,
"loss": 1.0788,
"step": 1300
},
{
"epoch": 0.19206829094789257,
"grad_norm": 10.025015830993652,
"learning_rate": 1.7963636363636364e-05,
"loss": 1.149,
"step": 1350
},
{
"epoch": 0.1991819313533701,
"grad_norm": 9.246174812316895,
"learning_rate": 1.7805533596837945e-05,
"loss": 1.1171,
"step": 1400
},
{
"epoch": 0.20629557175884758,
"grad_norm": 9.556463241577148,
"learning_rate": 1.7647430830039527e-05,
"loss": 1.1744,
"step": 1450
},
{
"epoch": 0.2134092121643251,
"grad_norm": 13.477895736694336,
"learning_rate": 1.7489328063241108e-05,
"loss": 1.0723,
"step": 1500
},
{
"epoch": 0.22052285256980259,
"grad_norm": 7.583223342895508,
"learning_rate": 1.733122529644269e-05,
"loss": 1.1004,
"step": 1550
},
{
"epoch": 0.2276364929752801,
"grad_norm": 9.056886672973633,
"learning_rate": 1.717312252964427e-05,
"loss": 1.1413,
"step": 1600
},
{
"epoch": 0.2347501333807576,
"grad_norm": 8.818636894226074,
"learning_rate": 1.701501976284585e-05,
"loss": 1.0248,
"step": 1650
},
{
"epoch": 0.2418637737862351,
"grad_norm": 11.717618942260742,
"learning_rate": 1.6856916996047434e-05,
"loss": 1.0386,
"step": 1700
},
{
"epoch": 0.2489774141917126,
"grad_norm": 7.414971828460693,
"learning_rate": 1.6698814229249015e-05,
"loss": 1.0885,
"step": 1750
},
{
"epoch": 0.2560910545971901,
"grad_norm": 6.960204601287842,
"learning_rate": 1.6540711462450593e-05,
"loss": 1.053,
"step": 1800
},
{
"epoch": 0.26320469500266763,
"grad_norm": 6.861100673675537,
"learning_rate": 1.6382608695652174e-05,
"loss": 1.0398,
"step": 1850
},
{
"epoch": 0.2703183354081451,
"grad_norm": 7.645370960235596,
"learning_rate": 1.6224505928853756e-05,
"loss": 0.9453,
"step": 1900
},
{
"epoch": 0.2774319758136226,
"grad_norm": 7.169046878814697,
"learning_rate": 1.6066403162055337e-05,
"loss": 0.9847,
"step": 1950
},
{
"epoch": 0.28454561621910013,
"grad_norm": 6.290506362915039,
"learning_rate": 1.5908300395256918e-05,
"loss": 0.9592,
"step": 2000
},
{
"epoch": 0.29165925662457765,
"grad_norm": 8.001459121704102,
"learning_rate": 1.57501976284585e-05,
"loss": 0.9674,
"step": 2050
},
{
"epoch": 0.2987728970300551,
"grad_norm": 8.543235778808594,
"learning_rate": 1.559525691699605e-05,
"loss": 1.003,
"step": 2100
},
{
"epoch": 0.3058865374355326,
"grad_norm": 10.702069282531738,
"learning_rate": 1.543715415019763e-05,
"loss": 0.9914,
"step": 2150
},
{
"epoch": 0.31300017784101014,
"grad_norm": 5.234061241149902,
"learning_rate": 1.527905138339921e-05,
"loss": 1.0809,
"step": 2200
},
{
"epoch": 0.32011381824648766,
"grad_norm": 8.00466251373291,
"learning_rate": 1.5120948616600793e-05,
"loss": 0.9713,
"step": 2250
},
{
"epoch": 0.3272274586519651,
"grad_norm": 7.484270095825195,
"learning_rate": 1.4962845849802372e-05,
"loss": 0.906,
"step": 2300
},
{
"epoch": 0.33434109905744264,
"grad_norm": 17.150543212890625,
"learning_rate": 1.4804743083003954e-05,
"loss": 1.0206,
"step": 2350
},
{
"epoch": 0.34145473946292015,
"grad_norm": 8.58785343170166,
"learning_rate": 1.4646640316205533e-05,
"loss": 0.9868,
"step": 2400
},
{
"epoch": 0.34856837986839767,
"grad_norm": 7.040744304656982,
"learning_rate": 1.4488537549407116e-05,
"loss": 0.9225,
"step": 2450
},
{
"epoch": 0.35568202027387513,
"grad_norm": 8.559785842895508,
"learning_rate": 1.4330434782608698e-05,
"loss": 0.9345,
"step": 2500
},
{
"epoch": 0.36279566067935265,
"grad_norm": 9.140971183776855,
"learning_rate": 1.4172332015810277e-05,
"loss": 1.004,
"step": 2550
},
{
"epoch": 0.36990930108483017,
"grad_norm": 8.379377365112305,
"learning_rate": 1.4014229249011859e-05,
"loss": 1.0199,
"step": 2600
},
{
"epoch": 0.3770229414903077,
"grad_norm": 6.753004550933838,
"learning_rate": 1.3856126482213438e-05,
"loss": 0.9463,
"step": 2650
},
{
"epoch": 0.38413658189578515,
"grad_norm": 7.884097576141357,
"learning_rate": 1.3698023715415021e-05,
"loss": 0.9367,
"step": 2700
},
{
"epoch": 0.39125022230126266,
"grad_norm": 8.405110359191895,
"learning_rate": 1.3539920948616603e-05,
"loss": 0.9036,
"step": 2750
},
{
"epoch": 0.3983638627067402,
"grad_norm": 7.3707661628723145,
"learning_rate": 1.3381818181818182e-05,
"loss": 0.9868,
"step": 2800
},
{
"epoch": 0.4054775031122177,
"grad_norm": 9.472613334655762,
"learning_rate": 1.3223715415019764e-05,
"loss": 0.9809,
"step": 2850
},
{
"epoch": 0.41259114351769516,
"grad_norm": 8.191240310668945,
"learning_rate": 1.3065612648221343e-05,
"loss": 0.9249,
"step": 2900
},
{
"epoch": 0.4197047839231727,
"grad_norm": 7.100245952606201,
"learning_rate": 1.2907509881422926e-05,
"loss": 0.9303,
"step": 2950
},
{
"epoch": 0.4268184243286502,
"grad_norm": 6.30331563949585,
"learning_rate": 1.2749407114624508e-05,
"loss": 0.9459,
"step": 3000
},
{
"epoch": 0.4339320647341277,
"grad_norm": 5.755526065826416,
"learning_rate": 1.2591304347826087e-05,
"loss": 0.8818,
"step": 3050
},
{
"epoch": 0.44104570513960517,
"grad_norm": 7.514272689819336,
"learning_rate": 1.243320158102767e-05,
"loss": 0.8817,
"step": 3100
},
{
"epoch": 0.4481593455450827,
"grad_norm": 6.345920562744141,
"learning_rate": 1.227509881422925e-05,
"loss": 0.8257,
"step": 3150
},
{
"epoch": 0.4552729859505602,
"grad_norm": 5.915317058563232,
"learning_rate": 1.2120158102766799e-05,
"loss": 0.9691,
"step": 3200
},
{
"epoch": 0.4623866263560377,
"grad_norm": 4.785401821136475,
"learning_rate": 1.1962055335968382e-05,
"loss": 0.97,
"step": 3250
},
{
"epoch": 0.4695002667615152,
"grad_norm": 8.403907775878906,
"learning_rate": 1.1803952569169962e-05,
"loss": 0.8699,
"step": 3300
},
{
"epoch": 0.4766139071669927,
"grad_norm": 8.583006858825684,
"learning_rate": 1.1645849802371543e-05,
"loss": 0.9233,
"step": 3350
},
{
"epoch": 0.4837275475724702,
"grad_norm": 7.836755752563477,
"learning_rate": 1.1487747035573123e-05,
"loss": 0.8716,
"step": 3400
},
{
"epoch": 0.49084118797794773,
"grad_norm": 4.714875221252441,
"learning_rate": 1.1329644268774704e-05,
"loss": 0.8499,
"step": 3450
},
{
"epoch": 0.4979548283834252,
"grad_norm": 7.097923755645752,
"learning_rate": 1.1171541501976287e-05,
"loss": 0.912,
"step": 3500
},
{
"epoch": 0.5050684687889028,
"grad_norm": 4.853347301483154,
"learning_rate": 1.1013438735177867e-05,
"loss": 0.8411,
"step": 3550
},
{
"epoch": 0.5121821091943802,
"grad_norm": 6.642279148101807,
"learning_rate": 1.0855335968379448e-05,
"loss": 0.9325,
"step": 3600
},
{
"epoch": 0.5192957495998577,
"grad_norm": 6.702445030212402,
"learning_rate": 1.0697233201581028e-05,
"loss": 0.8354,
"step": 3650
},
{
"epoch": 0.5264093900053353,
"grad_norm": 6.634207725524902,
"learning_rate": 1.0539130434782609e-05,
"loss": 0.8813,
"step": 3700
},
{
"epoch": 0.5335230304108127,
"grad_norm": 4.122372627258301,
"learning_rate": 1.0381027667984192e-05,
"loss": 0.7811,
"step": 3750
},
{
"epoch": 0.5406366708162902,
"grad_norm": 8.899633407592773,
"learning_rate": 1.0222924901185772e-05,
"loss": 0.8707,
"step": 3800
},
{
"epoch": 0.5477503112217678,
"grad_norm": 5.374997615814209,
"learning_rate": 1.0064822134387353e-05,
"loss": 0.8854,
"step": 3850
},
{
"epoch": 0.5548639516272452,
"grad_norm": 7.9275102615356445,
"learning_rate": 9.906719367588934e-06,
"loss": 0.9255,
"step": 3900
},
{
"epoch": 0.5619775920327228,
"grad_norm": 6.925314903259277,
"learning_rate": 9.748616600790514e-06,
"loss": 0.9027,
"step": 3950
},
{
"epoch": 0.5690912324382003,
"grad_norm": 9.117793083190918,
"learning_rate": 9.590513833992095e-06,
"loss": 0.8831,
"step": 4000
},
{
"epoch": 0.5762048728436777,
"grad_norm": 5.861302852630615,
"learning_rate": 9.432411067193677e-06,
"loss": 0.8329,
"step": 4050
},
{
"epoch": 0.5833185132491553,
"grad_norm": 6.011074066162109,
"learning_rate": 9.274308300395256e-06,
"loss": 0.8766,
"step": 4100
},
{
"epoch": 0.5904321536546328,
"grad_norm": 5.763038158416748,
"learning_rate": 9.11620553359684e-06,
"loss": 0.9374,
"step": 4150
},
{
"epoch": 0.5975457940601102,
"grad_norm": 5.520111083984375,
"learning_rate": 8.95810276679842e-06,
"loss": 0.8813,
"step": 4200
},
{
"epoch": 0.6046594344655878,
"grad_norm": 7.351500511169434,
"learning_rate": 8.8e-06,
"loss": 0.8677,
"step": 4250
},
{
"epoch": 0.6117730748710652,
"grad_norm": 5.451608657836914,
"learning_rate": 8.641897233201582e-06,
"loss": 0.8606,
"step": 4300
},
{
"epoch": 0.6188867152765428,
"grad_norm": 5.458895683288574,
"learning_rate": 8.483794466403163e-06,
"loss": 0.8522,
"step": 4350
},
{
"epoch": 0.6260003556820203,
"grad_norm": 5.554731369018555,
"learning_rate": 8.325691699604743e-06,
"loss": 0.9042,
"step": 4400
},
{
"epoch": 0.6331139960874977,
"grad_norm": 3.6643424034118652,
"learning_rate": 8.167588932806326e-06,
"loss": 0.8696,
"step": 4450
},
{
"epoch": 0.6402276364929753,
"grad_norm": 5.593344688415527,
"learning_rate": 8.009486166007906e-06,
"loss": 0.7663,
"step": 4500
},
{
"epoch": 0.6473412768984528,
"grad_norm": 7.597021102905273,
"learning_rate": 7.851383399209487e-06,
"loss": 0.7752,
"step": 4550
},
{
"epoch": 0.6544549173039302,
"grad_norm": 4.095972537994385,
"learning_rate": 7.693280632411068e-06,
"loss": 0.897,
"step": 4600
},
{
"epoch": 0.6615685577094078,
"grad_norm": 7.817249298095703,
"learning_rate": 7.535177865612649e-06,
"loss": 0.7448,
"step": 4650
},
{
"epoch": 0.6686821981148853,
"grad_norm": 3.296755313873291,
"learning_rate": 7.37707509881423e-06,
"loss": 0.8065,
"step": 4700
},
{
"epoch": 0.6757958385203628,
"grad_norm": 5.186858177185059,
"learning_rate": 7.218972332015811e-06,
"loss": 0.8618,
"step": 4750
},
{
"epoch": 0.6829094789258403,
"grad_norm": 6.484861373901367,
"learning_rate": 7.060869565217392e-06,
"loss": 0.8549,
"step": 4800
},
{
"epoch": 0.6900231193313178,
"grad_norm": 4.90764856338501,
"learning_rate": 6.902766798418972e-06,
"loss": 0.9441,
"step": 4850
},
{
"epoch": 0.6971367597367953,
"grad_norm": 3.6083099842071533,
"learning_rate": 6.744664031620554e-06,
"loss": 0.7467,
"step": 4900
},
{
"epoch": 0.7042504001422728,
"grad_norm": 4.79904317855835,
"learning_rate": 6.586561264822134e-06,
"loss": 0.7974,
"step": 4950
},
{
"epoch": 0.7113640405477503,
"grad_norm": 8.30154800415039,
"learning_rate": 6.4284584980237165e-06,
"loss": 0.8927,
"step": 5000
},
{
"epoch": 0.7184776809532278,
"grad_norm": 5.582029819488525,
"learning_rate": 6.270355731225297e-06,
"loss": 0.8348,
"step": 5050
},
{
"epoch": 0.7255913213587053,
"grad_norm": 7.575035572052002,
"learning_rate": 6.112252964426878e-06,
"loss": 0.8691,
"step": 5100
},
{
"epoch": 0.7327049617641829,
"grad_norm": 6.100114345550537,
"learning_rate": 5.954150197628459e-06,
"loss": 0.788,
"step": 5150
},
{
"epoch": 0.7398186021696603,
"grad_norm": 7.05925178527832,
"learning_rate": 5.796047430830039e-06,
"loss": 0.8325,
"step": 5200
},
{
"epoch": 0.7469322425751378,
"grad_norm": 6.5520920753479,
"learning_rate": 5.6379446640316215e-06,
"loss": 0.7792,
"step": 5250
},
{
"epoch": 0.7540458829806154,
"grad_norm": 6.9081807136535645,
"learning_rate": 5.479841897233202e-06,
"loss": 0.805,
"step": 5300
},
{
"epoch": 0.7611595233860928,
"grad_norm": 7.752345085144043,
"learning_rate": 5.321739130434783e-06,
"loss": 0.7998,
"step": 5350
},
{
"epoch": 0.7682731637915703,
"grad_norm": 3.8013064861297607,
"learning_rate": 5.163636363636364e-06,
"loss": 0.7748,
"step": 5400
},
{
"epoch": 0.7753868041970479,
"grad_norm": 5.316600322723389,
"learning_rate": 5.005533596837945e-06,
"loss": 0.7644,
"step": 5450
},
{
"epoch": 0.7825004446025253,
"grad_norm": 8.250207901000977,
"learning_rate": 4.8474308300395266e-06,
"loss": 0.7766,
"step": 5500
},
{
"epoch": 0.7896140850080029,
"grad_norm": 5.134970664978027,
"learning_rate": 4.689328063241107e-06,
"loss": 0.79,
"step": 5550
},
{
"epoch": 0.7967277254134804,
"grad_norm": 5.832414627075195,
"learning_rate": 4.531225296442688e-06,
"loss": 0.9154,
"step": 5600
},
{
"epoch": 0.8038413658189578,
"grad_norm": 6.164233684539795,
"learning_rate": 4.373122529644269e-06,
"loss": 0.8288,
"step": 5650
},
{
"epoch": 0.8109550062244354,
"grad_norm": 11.60009479522705,
"learning_rate": 4.21501976284585e-06,
"loss": 0.7047,
"step": 5700
},
{
"epoch": 0.8180686466299129,
"grad_norm": 3.422287702560425,
"learning_rate": 4.056916996047432e-06,
"loss": 0.7387,
"step": 5750
},
{
"epoch": 0.8251822870353903,
"grad_norm": 3.786428689956665,
"learning_rate": 3.898814229249012e-06,
"loss": 0.806,
"step": 5800
},
{
"epoch": 0.8322959274408679,
"grad_norm": 3.7693772315979004,
"learning_rate": 3.740711462450593e-06,
"loss": 0.8136,
"step": 5850
},
{
"epoch": 0.8394095678463454,
"grad_norm": 3.2537221908569336,
"learning_rate": 3.5826086956521744e-06,
"loss": 0.7834,
"step": 5900
},
{
"epoch": 0.8465232082518229,
"grad_norm": 2.938720226287842,
"learning_rate": 3.4245059288537553e-06,
"loss": 0.7607,
"step": 5950
},
{
"epoch": 0.8536368486573004,
"grad_norm": 6.700362205505371,
"learning_rate": 3.266403162055336e-06,
"loss": 0.8713,
"step": 6000
},
{
"epoch": 0.8607504890627778,
"grad_norm": 4.768782615661621,
"learning_rate": 3.1083003952569176e-06,
"loss": 0.8086,
"step": 6050
},
{
"epoch": 0.8678641294682554,
"grad_norm": 6.537806034088135,
"learning_rate": 2.950197628458498e-06,
"loss": 0.8722,
"step": 6100
},
{
"epoch": 0.8749777698737329,
"grad_norm": 6.817727088928223,
"learning_rate": 2.792094861660079e-06,
"loss": 0.7126,
"step": 6150
},
{
"epoch": 0.8820914102792103,
"grad_norm": 5.932295799255371,
"learning_rate": 2.6339920948616604e-06,
"loss": 0.778,
"step": 6200
},
{
"epoch": 0.8892050506846879,
"grad_norm": 6.475405693054199,
"learning_rate": 2.4758893280632413e-06,
"loss": 0.7812,
"step": 6250
},
{
"epoch": 0.8963186910901654,
"grad_norm": 8.57321834564209,
"learning_rate": 2.3177865612648222e-06,
"loss": 0.7654,
"step": 6300
},
{
"epoch": 0.903432331495643,
"grad_norm": 1.9723538160324097,
"learning_rate": 2.1596837944664036e-06,
"loss": 0.7701,
"step": 6350
},
{
"epoch": 0.9105459719011204,
"grad_norm": 6.585085391998291,
"learning_rate": 2.001581027667984e-06,
"loss": 0.8419,
"step": 6400
},
{
"epoch": 0.9176596123065979,
"grad_norm": 4.754767894744873,
"learning_rate": 1.8434782608695654e-06,
"loss": 0.9006,
"step": 6450
},
{
"epoch": 0.9247732527120754,
"grad_norm": 4.661940574645996,
"learning_rate": 1.6853754940711464e-06,
"loss": 0.7784,
"step": 6500
},
{
"epoch": 0.9318868931175529,
"grad_norm": 4.972583770751953,
"learning_rate": 1.5272727272727275e-06,
"loss": 0.8112,
"step": 6550
},
{
"epoch": 0.9390005335230304,
"grad_norm": 6.641800403594971,
"learning_rate": 1.3691699604743084e-06,
"loss": 0.7466,
"step": 6600
},
{
"epoch": 0.9461141739285079,
"grad_norm": 5.098995208740234,
"learning_rate": 1.2110671936758893e-06,
"loss": 0.824,
"step": 6650
},
{
"epoch": 0.9532278143339854,
"grad_norm": 7.060574054718018,
"learning_rate": 1.0529644268774705e-06,
"loss": 0.8502,
"step": 6700
},
{
"epoch": 0.960341454739463,
"grad_norm": 4.678645610809326,
"learning_rate": 8.948616600790515e-07,
"loss": 0.8004,
"step": 6750
},
{
"epoch": 0.9674550951449404,
"grad_norm": 5.895313739776611,
"learning_rate": 7.367588932806324e-07,
"loss": 0.6811,
"step": 6800
},
{
"epoch": 0.9745687355504179,
"grad_norm": 5.316034317016602,
"learning_rate": 5.786561264822135e-07,
"loss": 0.7955,
"step": 6850
},
{
"epoch": 0.9816823759558955,
"grad_norm": 5.032801628112793,
"learning_rate": 4.205533596837945e-07,
"loss": 0.8206,
"step": 6900
},
{
"epoch": 0.9887960163613729,
"grad_norm": 5.486656188964844,
"learning_rate": 2.6245059288537554e-07,
"loss": 0.795,
"step": 6950
},
{
"epoch": 0.9959096567668504,
"grad_norm": 6.862821102142334,
"learning_rate": 1.0434782608695654e-07,
"loss": 0.676,
"step": 7000
},
{
"epoch": 0.9998932953939178,
"eval_loss": 0.767158567905426,
"eval_runtime": 341.0802,
"eval_samples_per_second": 15.454,
"eval_steps_per_second": 1.932,
"step": 7028
}
],
"logging_steps": 50,
"max_steps": 7028,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.803879769007718e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}