Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
DeeLearning's picture
Model save
f3667a6 verified
raw
history blame
25.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 100,
"global_step": 676,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029585798816568046,
"grad_norm": 0.6439659821018778,
"learning_rate": 1.4705882352941177e-06,
"loss": 1.1017,
"step": 5
},
{
"epoch": 0.05917159763313609,
"grad_norm": 0.5057877785583942,
"learning_rate": 2.9411764705882355e-06,
"loss": 1.1007,
"step": 10
},
{
"epoch": 0.08875739644970414,
"grad_norm": 0.34275947536219825,
"learning_rate": 4.411764705882353e-06,
"loss": 1.0629,
"step": 15
},
{
"epoch": 0.11834319526627218,
"grad_norm": 0.33577063948976177,
"learning_rate": 5.882352941176471e-06,
"loss": 1.0051,
"step": 20
},
{
"epoch": 0.14792899408284024,
"grad_norm": 0.2949471201474794,
"learning_rate": 7.352941176470589e-06,
"loss": 0.949,
"step": 25
},
{
"epoch": 0.17751479289940827,
"grad_norm": 0.21159349877560912,
"learning_rate": 8.823529411764707e-06,
"loss": 0.9046,
"step": 30
},
{
"epoch": 0.20710059171597633,
"grad_norm": 0.15856411969263493,
"learning_rate": 1.0294117647058823e-05,
"loss": 0.888,
"step": 35
},
{
"epoch": 0.23668639053254437,
"grad_norm": 0.1353999900027582,
"learning_rate": 1.1764705882352942e-05,
"loss": 0.8801,
"step": 40
},
{
"epoch": 0.26627218934911245,
"grad_norm": 0.13487701799423293,
"learning_rate": 1.323529411764706e-05,
"loss": 0.8569,
"step": 45
},
{
"epoch": 0.2958579881656805,
"grad_norm": 0.10937715780925006,
"learning_rate": 1.4705882352941179e-05,
"loss": 0.8411,
"step": 50
},
{
"epoch": 0.3254437869822485,
"grad_norm": 0.10116789827792308,
"learning_rate": 1.6176470588235296e-05,
"loss": 0.8256,
"step": 55
},
{
"epoch": 0.35502958579881655,
"grad_norm": 0.09486346656051839,
"learning_rate": 1.7647058823529414e-05,
"loss": 0.8314,
"step": 60
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.09783173154524022,
"learning_rate": 1.911764705882353e-05,
"loss": 0.8011,
"step": 65
},
{
"epoch": 0.41420118343195267,
"grad_norm": 0.09449363160697857,
"learning_rate": 1.999946602771351e-05,
"loss": 0.7999,
"step": 70
},
{
"epoch": 0.4437869822485207,
"grad_norm": 0.09797892714489181,
"learning_rate": 1.9993459494370938e-05,
"loss": 0.8084,
"step": 75
},
{
"epoch": 0.47337278106508873,
"grad_norm": 0.09083934620130703,
"learning_rate": 1.9980782984658682e-05,
"loss": 0.7872,
"step": 80
},
{
"epoch": 0.5029585798816568,
"grad_norm": 0.09674625831588454,
"learning_rate": 1.996144495931251e-05,
"loss": 0.789,
"step": 85
},
{
"epoch": 0.5325443786982249,
"grad_norm": 0.09153750269543345,
"learning_rate": 1.9935458325191365e-05,
"loss": 0.7865,
"step": 90
},
{
"epoch": 0.5621301775147929,
"grad_norm": 0.0957908326296121,
"learning_rate": 1.9902840426662897e-05,
"loss": 0.7924,
"step": 95
},
{
"epoch": 0.591715976331361,
"grad_norm": 0.09259701132768274,
"learning_rate": 1.9863613034027224e-05,
"loss": 0.7665,
"step": 100
},
{
"epoch": 0.591715976331361,
"eval_loss": 0.8009439706802368,
"eval_runtime": 1.9876,
"eval_samples_per_second": 64.399,
"eval_steps_per_second": 2.012,
"step": 100
},
{
"epoch": 0.621301775147929,
"grad_norm": 0.09861544877125988,
"learning_rate": 1.9817802328986696e-05,
"loss": 0.7689,
"step": 105
},
{
"epoch": 0.650887573964497,
"grad_norm": 0.09818067763821876,
"learning_rate": 1.9765438887171327e-05,
"loss": 0.7788,
"step": 110
},
{
"epoch": 0.6804733727810651,
"grad_norm": 0.10112115466155701,
"learning_rate": 1.970655765773159e-05,
"loss": 0.772,
"step": 115
},
{
"epoch": 0.7100591715976331,
"grad_norm": 0.0914598701096024,
"learning_rate": 1.9641197940012136e-05,
"loss": 0.7596,
"step": 120
},
{
"epoch": 0.7396449704142012,
"grad_norm": 0.10299550351836897,
"learning_rate": 1.956940335732209e-05,
"loss": 0.7608,
"step": 125
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.09676217855032584,
"learning_rate": 1.9491221827819348e-05,
"loss": 0.7608,
"step": 130
},
{
"epoch": 0.7988165680473372,
"grad_norm": 0.10054271920883129,
"learning_rate": 1.9406705532528373e-05,
"loss": 0.7625,
"step": 135
},
{
"epoch": 0.8284023668639053,
"grad_norm": 0.09316749954420439,
"learning_rate": 1.9315910880512792e-05,
"loss": 0.7526,
"step": 140
},
{
"epoch": 0.8579881656804734,
"grad_norm": 0.09924380627210336,
"learning_rate": 1.921889847122605e-05,
"loss": 0.758,
"step": 145
},
{
"epoch": 0.8875739644970414,
"grad_norm": 0.0935394258583913,
"learning_rate": 1.911573305406528e-05,
"loss": 0.7667,
"step": 150
},
{
"epoch": 0.9171597633136095,
"grad_norm": 0.0941062409493051,
"learning_rate": 1.9006483485155338e-05,
"loss": 0.7597,
"step": 155
},
{
"epoch": 0.9467455621301775,
"grad_norm": 0.0945515154197286,
"learning_rate": 1.8891222681391853e-05,
"loss": 0.7583,
"step": 160
},
{
"epoch": 0.9763313609467456,
"grad_norm": 0.0944093533101716,
"learning_rate": 1.877002757177403e-05,
"loss": 0.7402,
"step": 165
},
{
"epoch": 1.0059171597633136,
"grad_norm": 0.1026209727213443,
"learning_rate": 1.8642979046059595e-05,
"loss": 0.7451,
"step": 170
},
{
"epoch": 1.0355029585798816,
"grad_norm": 0.09253733372936898,
"learning_rate": 1.8510161900776186e-05,
"loss": 0.7227,
"step": 175
},
{
"epoch": 1.0650887573964498,
"grad_norm": 0.10701582057186095,
"learning_rate": 1.8371664782625287e-05,
"loss": 0.7235,
"step": 180
},
{
"epoch": 1.0946745562130178,
"grad_norm": 0.10901317779074045,
"learning_rate": 1.8227580129316368e-05,
"loss": 0.7297,
"step": 185
},
{
"epoch": 1.1242603550295858,
"grad_norm": 0.09272510989071414,
"learning_rate": 1.8078004107870797e-05,
"loss": 0.7269,
"step": 190
},
{
"epoch": 1.1538461538461537,
"grad_norm": 0.09753000927785783,
"learning_rate": 1.7923036550436706e-05,
"loss": 0.7225,
"step": 195
},
{
"epoch": 1.183431952662722,
"grad_norm": 0.10006006351926447,
"learning_rate": 1.7762780887657576e-05,
"loss": 0.7224,
"step": 200
},
{
"epoch": 1.183431952662722,
"eval_loss": 0.7682048082351685,
"eval_runtime": 1.9381,
"eval_samples_per_second": 66.045,
"eval_steps_per_second": 2.064,
"step": 200
},
{
"epoch": 1.21301775147929,
"grad_norm": 0.08514109288732025,
"learning_rate": 1.759734407963911e-05,
"loss": 0.7141,
"step": 205
},
{
"epoch": 1.242603550295858,
"grad_norm": 0.10153944071881804,
"learning_rate": 1.74268365445604e-05,
"loss": 0.7251,
"step": 210
},
{
"epoch": 1.272189349112426,
"grad_norm": 0.08516674487530249,
"learning_rate": 1.725137208497705e-05,
"loss": 0.7095,
"step": 215
},
{
"epoch": 1.301775147928994,
"grad_norm": 0.09000557413388141,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.7221,
"step": 220
},
{
"epoch": 1.331360946745562,
"grad_norm": 0.10261984193674742,
"learning_rate": 1.688604406645903e-05,
"loss": 0.7248,
"step": 225
},
{
"epoch": 1.3609467455621302,
"grad_norm": 0.09063724453079985,
"learning_rate": 1.6696424339928153e-05,
"loss": 0.7189,
"step": 230
},
{
"epoch": 1.3905325443786982,
"grad_norm": 0.0876664358809029,
"learning_rate": 1.6502335190958135e-05,
"loss": 0.7115,
"step": 235
},
{
"epoch": 1.4201183431952662,
"grad_norm": 0.08926784554788682,
"learning_rate": 1.6303906161279554e-05,
"loss": 0.7052,
"step": 240
},
{
"epoch": 1.4497041420118344,
"grad_norm": 0.09216166902850643,
"learning_rate": 1.6101269689207656e-05,
"loss": 0.7304,
"step": 245
},
{
"epoch": 1.4792899408284024,
"grad_norm": 0.09489888582394233,
"learning_rate": 1.5894561021248535e-05,
"loss": 0.7094,
"step": 250
},
{
"epoch": 1.5088757396449703,
"grad_norm": 0.09815715101407019,
"learning_rate": 1.568391812183097e-05,
"loss": 0.7125,
"step": 255
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.11023233541754712,
"learning_rate": 1.5469481581224274e-05,
"loss": 0.7203,
"step": 260
},
{
"epoch": 1.5680473372781065,
"grad_norm": 0.0990016959835567,
"learning_rate": 1.5251394521703496e-05,
"loss": 0.7162,
"step": 265
},
{
"epoch": 1.5976331360946747,
"grad_norm": 0.09361562420520224,
"learning_rate": 1.5029802502024788e-05,
"loss": 0.7143,
"step": 270
},
{
"epoch": 1.6272189349112427,
"grad_norm": 0.09061363853326403,
"learning_rate": 1.4804853420274471e-05,
"loss": 0.7178,
"step": 275
},
{
"epoch": 1.6568047337278107,
"grad_norm": 0.09054748720794353,
"learning_rate": 1.4576697415156818e-05,
"loss": 0.7072,
"step": 280
},
{
"epoch": 1.6863905325443787,
"grad_norm": 0.09231829692501531,
"learning_rate": 1.434548676578634e-05,
"loss": 0.7154,
"step": 285
},
{
"epoch": 1.7159763313609466,
"grad_norm": 0.08703936573542412,
"learning_rate": 1.4111375790051511e-05,
"loss": 0.7011,
"step": 290
},
{
"epoch": 1.7455621301775148,
"grad_norm": 0.08968479884070231,
"learning_rate": 1.3874520741617734e-05,
"loss": 0.7122,
"step": 295
},
{
"epoch": 1.7751479289940828,
"grad_norm": 0.09437837766978704,
"learning_rate": 1.3635079705638298e-05,
"loss": 0.7017,
"step": 300
},
{
"epoch": 1.7751479289940828,
"eval_loss": 0.752805233001709,
"eval_runtime": 1.9496,
"eval_samples_per_second": 65.655,
"eval_steps_per_second": 2.052,
"step": 300
},
{
"epoch": 1.804733727810651,
"grad_norm": 0.08903936053110759,
"learning_rate": 1.3393212493242964e-05,
"loss": 0.725,
"step": 305
},
{
"epoch": 1.834319526627219,
"grad_norm": 0.08414153400589117,
"learning_rate": 1.3149080534874519e-05,
"loss": 0.7054,
"step": 310
},
{
"epoch": 1.863905325443787,
"grad_norm": 0.09439034181825776,
"learning_rate": 1.2902846772544625e-05,
"loss": 0.7021,
"step": 315
},
{
"epoch": 1.893491124260355,
"grad_norm": 0.08964239172806014,
"learning_rate": 1.2654675551080724e-05,
"loss": 0.7064,
"step": 320
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.09358273223772219,
"learning_rate": 1.2404732508436693e-05,
"loss": 0.6996,
"step": 325
},
{
"epoch": 1.952662721893491,
"grad_norm": 0.08800666939014279,
"learning_rate": 1.2153184465140413e-05,
"loss": 0.7137,
"step": 330
},
{
"epoch": 1.9822485207100593,
"grad_norm": 0.09777074659138213,
"learning_rate": 1.1900199312952047e-05,
"loss": 0.7061,
"step": 335
},
{
"epoch": 2.0118343195266273,
"grad_norm": 0.08420032391185793,
"learning_rate": 1.164594590280734e-05,
"loss": 0.6848,
"step": 340
},
{
"epoch": 2.0414201183431953,
"grad_norm": 0.09709124491718055,
"learning_rate": 1.1390593932120742e-05,
"loss": 0.6797,
"step": 345
},
{
"epoch": 2.0710059171597632,
"grad_norm": 0.08513494025885673,
"learning_rate": 1.1134313831523547e-05,
"loss": 0.6783,
"step": 350
},
{
"epoch": 2.100591715976331,
"grad_norm": 0.08526343643130264,
"learning_rate": 1.0877276651112662e-05,
"loss": 0.683,
"step": 355
},
{
"epoch": 2.1301775147928996,
"grad_norm": 0.08415048962920803,
"learning_rate": 1.0619653946285948e-05,
"loss": 0.6699,
"step": 360
},
{
"epoch": 2.1597633136094676,
"grad_norm": 0.08993894149758148,
"learning_rate": 1.0361617663240253e-05,
"loss": 0.6616,
"step": 365
},
{
"epoch": 2.1893491124260356,
"grad_norm": 0.08779637319471659,
"learning_rate": 1.0103340024208674e-05,
"loss": 0.6681,
"step": 370
},
{
"epoch": 2.2189349112426036,
"grad_norm": 0.07716142615588464,
"learning_rate": 9.844993412513533e-06,
"loss": 0.6847,
"step": 375
},
{
"epoch": 2.2485207100591715,
"grad_norm": 0.09680115255291638,
"learning_rate": 9.586750257511868e-06,
"loss": 0.6748,
"step": 380
},
{
"epoch": 2.2781065088757395,
"grad_norm": 0.08727553994241195,
"learning_rate": 9.328782919510186e-06,
"loss": 0.6727,
"step": 385
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.08569274603060249,
"learning_rate": 9.0712635747253e-06,
"loss": 0.6722,
"step": 390
},
{
"epoch": 2.337278106508876,
"grad_norm": 0.08838223625297882,
"learning_rate": 8.81436410036804e-06,
"loss": 0.6792,
"step": 395
},
{
"epoch": 2.366863905325444,
"grad_norm": 0.08243129604747186,
"learning_rate": 8.558255959926533e-06,
"loss": 0.6775,
"step": 400
},
{
"epoch": 2.366863905325444,
"eval_loss": 0.7467715740203857,
"eval_runtime": 1.954,
"eval_samples_per_second": 65.506,
"eval_steps_per_second": 2.047,
"step": 400
},
{
"epoch": 2.396449704142012,
"grad_norm": 0.08372814834591441,
"learning_rate": 8.30311008872561e-06,
"loss": 0.6867,
"step": 405
},
{
"epoch": 2.42603550295858,
"grad_norm": 0.08520010748643847,
"learning_rate": 8.04909677983872e-06,
"loss": 0.677,
"step": 410
},
{
"epoch": 2.455621301775148,
"grad_norm": 0.08436113404879492,
"learning_rate": 7.796385570428527e-06,
"loss": 0.6811,
"step": 415
},
{
"epoch": 2.485207100591716,
"grad_norm": 0.07770487004133524,
"learning_rate": 7.545145128592009e-06,
"loss": 0.6775,
"step": 420
},
{
"epoch": 2.5147928994082838,
"grad_norm": 0.07873507288955481,
"learning_rate": 7.295543140785604e-06,
"loss": 0.6607,
"step": 425
},
{
"epoch": 2.544378698224852,
"grad_norm": 0.08216416278696723,
"learning_rate": 7.0477461999055365e-06,
"loss": 0.6721,
"step": 430
},
{
"epoch": 2.57396449704142,
"grad_norm": 0.07808001669903655,
"learning_rate": 6.801919694098034e-06,
"loss": 0.669,
"step": 435
},
{
"epoch": 2.603550295857988,
"grad_norm": 0.07827371958864436,
"learning_rate": 6.558227696373617e-06,
"loss": 0.6737,
"step": 440
},
{
"epoch": 2.633136094674556,
"grad_norm": 0.09029552025645185,
"learning_rate": 6.316832855099173e-06,
"loss": 0.68,
"step": 445
},
{
"epoch": 2.662721893491124,
"grad_norm": 0.08627523522424184,
"learning_rate": 6.077896285440874e-06,
"loss": 0.6781,
"step": 450
},
{
"epoch": 2.6923076923076925,
"grad_norm": 0.0944458646122902,
"learning_rate": 5.841577461830408e-06,
"loss": 0.6856,
"step": 455
},
{
"epoch": 2.7218934911242605,
"grad_norm": 0.08627859291249917,
"learning_rate": 5.608034111526298e-06,
"loss": 0.6828,
"step": 460
},
{
"epoch": 2.7514792899408285,
"grad_norm": 0.07636613357565422,
"learning_rate": 5.377422109341332e-06,
"loss": 0.6631,
"step": 465
},
{
"epoch": 2.7810650887573964,
"grad_norm": 0.07763420602167485,
"learning_rate": 5.149895373606405e-06,
"loss": 0.6706,
"step": 470
},
{
"epoch": 2.8106508875739644,
"grad_norm": 0.0797684563299413,
"learning_rate": 4.92560576344013e-06,
"loss": 0.6726,
"step": 475
},
{
"epoch": 2.8402366863905324,
"grad_norm": 0.0743258532275492,
"learning_rate": 4.704702977392914e-06,
"loss": 0.6681,
"step": 480
},
{
"epoch": 2.8698224852071004,
"grad_norm": 0.08260380529165902,
"learning_rate": 4.487334453532998e-06,
"loss": 0.6732,
"step": 485
},
{
"epoch": 2.899408284023669,
"grad_norm": 0.07603345763322027,
"learning_rate": 4.2736452710412645e-06,
"loss": 0.6759,
"step": 490
},
{
"epoch": 2.9289940828402368,
"grad_norm": 0.0788588593958065,
"learning_rate": 4.063778053380446e-06,
"loss": 0.6669,
"step": 495
},
{
"epoch": 2.9585798816568047,
"grad_norm": 0.0765652404714636,
"learning_rate": 3.857872873103322e-06,
"loss": 0.6694,
"step": 500
},
{
"epoch": 2.9585798816568047,
"eval_loss": 0.7420404553413391,
"eval_runtime": 1.949,
"eval_samples_per_second": 65.673,
"eval_steps_per_second": 2.052,
"step": 500
},
{
"epoch": 2.9881656804733727,
"grad_norm": 0.07687126382252668,
"learning_rate": 3.6560671583635467e-06,
"loss": 0.6723,
"step": 505
},
{
"epoch": 3.0177514792899407,
"grad_norm": 0.07912130181111991,
"learning_rate": 3.4584956011913693e-06,
"loss": 0.6602,
"step": 510
},
{
"epoch": 3.0473372781065087,
"grad_norm": 0.07360557876355839,
"learning_rate": 3.2652900675956e-06,
"loss": 0.6566,
"step": 515
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.0787952110142718,
"learning_rate": 3.0765795095517026e-06,
"loss": 0.6599,
"step": 520
},
{
"epoch": 3.106508875739645,
"grad_norm": 0.07557678307758718,
"learning_rate": 2.8924898789348645e-06,
"loss": 0.6659,
"step": 525
},
{
"epoch": 3.136094674556213,
"grad_norm": 0.07175247734719413,
"learning_rate": 2.713144043455388e-06,
"loss": 0.6646,
"step": 530
},
{
"epoch": 3.165680473372781,
"grad_norm": 0.07747698601868783,
"learning_rate": 2.538661704652595e-06,
"loss": 0.6605,
"step": 535
},
{
"epoch": 3.195266272189349,
"grad_norm": 0.0718076928664842,
"learning_rate": 2.369159318001937e-06,
"loss": 0.6543,
"step": 540
},
{
"epoch": 3.224852071005917,
"grad_norm": 0.07533978001859822,
"learning_rate": 2.2047500151886047e-06,
"loss": 0.6531,
"step": 545
},
{
"epoch": 3.2544378698224854,
"grad_norm": 0.07485842542784214,
"learning_rate": 2.045543528599607e-06,
"loss": 0.6646,
"step": 550
},
{
"epoch": 3.2840236686390534,
"grad_norm": 0.0708662804937619,
"learning_rate": 1.8916461180845968e-06,
"loss": 0.648,
"step": 555
},
{
"epoch": 3.3136094674556213,
"grad_norm": 0.0757464412979693,
"learning_rate": 1.743160500034443e-06,
"loss": 0.6395,
"step": 560
},
{
"epoch": 3.3431952662721893,
"grad_norm": 0.07734435238783051,
"learning_rate": 1.6001857788247755e-06,
"loss": 0.6495,
"step": 565
},
{
"epoch": 3.3727810650887573,
"grad_norm": 0.0725938998129201,
"learning_rate": 1.4628173806703594e-06,
"loss": 0.6552,
"step": 570
},
{
"epoch": 3.4023668639053253,
"grad_norm": 0.07170585944252475,
"learning_rate": 1.3311469899343698e-06,
"loss": 0.6556,
"step": 575
},
{
"epoch": 3.4319526627218933,
"grad_norm": 0.07035425662199751,
"learning_rate": 1.2052624879351105e-06,
"loss": 0.6514,
"step": 580
},
{
"epoch": 3.4615384615384617,
"grad_norm": 0.0716406331513175,
"learning_rate": 1.0852478942910228e-06,
"loss": 0.6556,
"step": 585
},
{
"epoch": 3.4911242603550297,
"grad_norm": 0.07161356638036934,
"learning_rate": 9.711833108431234e-07,
"loss": 0.6501,
"step": 590
},
{
"epoch": 3.5207100591715976,
"grad_norm": 0.0707175122399628,
"learning_rate": 8.631448681922994e-07,
"loss": 0.6607,
"step": 595
},
{
"epoch": 3.5502958579881656,
"grad_norm": 0.07021197999680215,
"learning_rate": 7.612046748871327e-07,
"loss": 0.6649,
"step": 600
},
{
"epoch": 3.5502958579881656,
"eval_loss": 0.7433957457542419,
"eval_runtime": 1.9404,
"eval_samples_per_second": 65.967,
"eval_steps_per_second": 2.061,
"step": 600
},
{
"epoch": 3.5798816568047336,
"grad_norm": 0.06889588969409026,
"learning_rate": 6.65430769296207e-07,
"loss": 0.6427,
"step": 605
},
{
"epoch": 3.609467455621302,
"grad_norm": 0.07042732460501529,
"learning_rate": 5.758870741969635e-07,
"loss": 0.6524,
"step": 610
},
{
"epoch": 3.63905325443787,
"grad_norm": 0.06830708719143527,
"learning_rate": 4.926333541114558e-07,
"loss": 0.6604,
"step": 615
},
{
"epoch": 3.668639053254438,
"grad_norm": 0.07062137013338904,
"learning_rate": 4.1572517541747294e-07,
"loss": 0.6541,
"step": 620
},
{
"epoch": 3.698224852071006,
"grad_norm": 0.06967910127337004,
"learning_rate": 3.4521386926163134e-07,
"loss": 0.6491,
"step": 625
},
{
"epoch": 3.727810650887574,
"grad_norm": 0.06931589660694865,
"learning_rate": 2.811464972992195e-07,
"loss": 0.6444,
"step": 630
},
{
"epoch": 3.757396449704142,
"grad_norm": 0.06857941794190696,
"learning_rate": 2.2356582028363548e-07,
"loss": 0.652,
"step": 635
},
{
"epoch": 3.78698224852071,
"grad_norm": 0.07146304965594258,
"learning_rate": 1.7251026952640583e-07,
"loss": 0.6547,
"step": 640
},
{
"epoch": 3.8165680473372783,
"grad_norm": 0.06914829105737026,
"learning_rate": 1.2801392124681233e-07,
"loss": 0.6419,
"step": 645
},
{
"epoch": 3.8461538461538463,
"grad_norm": 0.06976851389737697,
"learning_rate": 9.010647382825421e-08,
"loss": 0.6559,
"step": 650
},
{
"epoch": 3.8757396449704142,
"grad_norm": 0.06949483068766184,
"learning_rate": 5.881322799653699e-08,
"loss": 0.6559,
"step": 655
},
{
"epoch": 3.905325443786982,
"grad_norm": 0.06853851606881345,
"learning_rate": 3.4155069933301535e-08,
"loss": 0.6515,
"step": 660
},
{
"epoch": 3.93491124260355,
"grad_norm": 0.07221419070779167,
"learning_rate": 1.6148457335876112e-08,
"loss": 0.646,
"step": 665
},
{
"epoch": 3.9644970414201186,
"grad_norm": 0.06942961780673317,
"learning_rate": 4.80540843283972e-09,
"loss": 0.6653,
"step": 670
},
{
"epoch": 3.994082840236686,
"grad_norm": 0.0680541699351626,
"learning_rate": 1.3349396265516235e-10,
"loss": 0.6561,
"step": 675
},
{
"epoch": 4.0,
"step": 676,
"total_flos": 308009150447616.0,
"train_loss": 0.7185798888728463,
"train_runtime": 4924.3182,
"train_samples_per_second": 17.554,
"train_steps_per_second": 0.137
}
],
"logging_steps": 5,
"max_steps": 676,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 308009150447616.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}