CodeV-R1-Distill-Qwen3-0.6b / trainer_state.json
yil384's picture
Upload folder using huggingface_hub
a19b8e6 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 500,
"global_step": 3426,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.035056967572304996,
"grad_norm": 12.451324462890625,
"learning_rate": 1.8446601941747574e-06,
"loss": 3.237,
"step": 20
},
{
"epoch": 0.07011393514460999,
"grad_norm": 4.131791114807129,
"learning_rate": 3.7864077669902915e-06,
"loss": 2.5266,
"step": 40
},
{
"epoch": 0.10517090271691498,
"grad_norm": 2.2451958656311035,
"learning_rate": 5.728155339805825e-06,
"loss": 1.8755,
"step": 60
},
{
"epoch": 0.14022787028921999,
"grad_norm": 1.4421552419662476,
"learning_rate": 7.66990291262136e-06,
"loss": 1.3649,
"step": 80
},
{
"epoch": 0.175284837861525,
"grad_norm": 0.9530179500579834,
"learning_rate": 9.611650485436894e-06,
"loss": 1.0674,
"step": 100
},
{
"epoch": 0.21034180543382996,
"grad_norm": 0.7194477319717407,
"learning_rate": 9.99942798060303e-06,
"loss": 0.9241,
"step": 120
},
{
"epoch": 0.24539877300613497,
"grad_norm": 0.6556061506271362,
"learning_rate": 9.997104376116195e-06,
"loss": 0.8575,
"step": 140
},
{
"epoch": 0.28045574057843997,
"grad_norm": 0.5718048810958862,
"learning_rate": 9.992994265395959e-06,
"loss": 0.829,
"step": 160
},
{
"epoch": 0.31551270815074495,
"grad_norm": 0.4922148287296295,
"learning_rate": 9.987099117840969e-06,
"loss": 0.8034,
"step": 180
},
{
"epoch": 0.35056967572305,
"grad_norm": 0.47302234172821045,
"learning_rate": 9.979421041015336e-06,
"loss": 0.7839,
"step": 200
},
{
"epoch": 0.38562664329535495,
"grad_norm": 0.49009189009666443,
"learning_rate": 9.969962779895172e-06,
"loss": 0.768,
"step": 220
},
{
"epoch": 0.42068361086765993,
"grad_norm": 0.4963654577732086,
"learning_rate": 9.958727715887218e-06,
"loss": 0.7628,
"step": 240
},
{
"epoch": 0.45574057843996496,
"grad_norm": 0.5206854343414307,
"learning_rate": 9.94571986561998e-06,
"loss": 0.7488,
"step": 260
},
{
"epoch": 0.49079754601226994,
"grad_norm": 0.48924869298934937,
"learning_rate": 9.930943879507748e-06,
"loss": 0.7436,
"step": 280
},
{
"epoch": 0.5258545135845749,
"grad_norm": 0.43540337681770325,
"learning_rate": 9.914405040088026e-06,
"loss": 0.7375,
"step": 300
},
{
"epoch": 0.5609114811568799,
"grad_norm": 0.44258421659469604,
"learning_rate": 9.896109260132993e-06,
"loss": 0.7277,
"step": 320
},
{
"epoch": 0.595968448729185,
"grad_norm": 0.4955386519432068,
"learning_rate": 9.876063080535627e-06,
"loss": 0.7284,
"step": 340
},
{
"epoch": 0.6310254163014899,
"grad_norm": 0.5027541518211365,
"learning_rate": 9.85427366797129e-06,
"loss": 0.7231,
"step": 360
},
{
"epoch": 0.6660823838737949,
"grad_norm": 0.4675957262516022,
"learning_rate": 9.830748812335576e-06,
"loss": 0.7212,
"step": 380
},
{
"epoch": 0.7011393514461,
"grad_norm": 0.4283595383167267,
"learning_rate": 9.805496923959363e-06,
"loss": 0.7164,
"step": 400
},
{
"epoch": 0.7361963190184049,
"grad_norm": 0.452084481716156,
"learning_rate": 9.778527030602049e-06,
"loss": 0.711,
"step": 420
},
{
"epoch": 0.7712532865907099,
"grad_norm": 0.4737929105758667,
"learning_rate": 9.74984877422405e-06,
"loss": 0.7084,
"step": 440
},
{
"epoch": 0.8063102541630149,
"grad_norm": 0.4964485466480255,
"learning_rate": 9.719472407539725e-06,
"loss": 0.7028,
"step": 460
},
{
"epoch": 0.8413672217353199,
"grad_norm": 0.44363030791282654,
"learning_rate": 9.68740879035194e-06,
"loss": 0.7045,
"step": 480
},
{
"epoch": 0.8764241893076249,
"grad_norm": 0.5004998445510864,
"learning_rate": 9.6536693856696e-06,
"loss": 0.6937,
"step": 500
},
{
"epoch": 0.9114811568799299,
"grad_norm": 0.4564264118671417,
"learning_rate": 9.618266255609533e-06,
"loss": 0.699,
"step": 520
},
{
"epoch": 0.9465381244522348,
"grad_norm": 0.4558616280555725,
"learning_rate": 9.58121205708418e-06,
"loss": 0.691,
"step": 540
},
{
"epoch": 0.9815950920245399,
"grad_norm": 0.413114458322525,
"learning_rate": 9.542520037276636e-06,
"loss": 0.6891,
"step": 560
},
{
"epoch": 1.0157756354075373,
"grad_norm": 0.403679758310318,
"learning_rate": 9.502204028904687e-06,
"loss": 0.6812,
"step": 580
},
{
"epoch": 1.0508326029798423,
"grad_norm": 0.40308722853660583,
"learning_rate": 9.46027844527549e-06,
"loss": 0.6791,
"step": 600
},
{
"epoch": 1.0858895705521472,
"grad_norm": 0.4085083603858948,
"learning_rate": 9.416758275132693e-06,
"loss": 0.6803,
"step": 620
},
{
"epoch": 1.1209465381244523,
"grad_norm": 0.4475920796394348,
"learning_rate": 9.371659077297843e-06,
"loss": 0.6789,
"step": 640
},
{
"epoch": 1.1560035056967572,
"grad_norm": 0.4604188799858093,
"learning_rate": 9.324996975107978e-06,
"loss": 0.674,
"step": 660
},
{
"epoch": 1.1910604732690622,
"grad_norm": 0.4190482795238495,
"learning_rate": 9.276788650651392e-06,
"loss": 0.6746,
"step": 680
},
{
"epoch": 1.2261174408413673,
"grad_norm": 0.420953631401062,
"learning_rate": 9.227051338803656e-06,
"loss": 0.6692,
"step": 700
},
{
"epoch": 1.2611744084136722,
"grad_norm": 0.4463854432106018,
"learning_rate": 9.175802821066009e-06,
"loss": 0.6737,
"step": 720
},
{
"epoch": 1.2962313759859772,
"grad_norm": 0.44004735350608826,
"learning_rate": 9.12306141920832e-06,
"loss": 0.6673,
"step": 740
},
{
"epoch": 1.331288343558282,
"grad_norm": 0.42015475034713745,
"learning_rate": 9.068845988718906e-06,
"loss": 0.6676,
"step": 760
},
{
"epoch": 1.3663453111305872,
"grad_norm": 0.43683475255966187,
"learning_rate": 9.013175912063534e-06,
"loss": 0.6649,
"step": 780
},
{
"epoch": 1.4014022787028921,
"grad_norm": 0.4281805753707886,
"learning_rate": 8.956071091756036e-06,
"loss": 0.6658,
"step": 800
},
{
"epoch": 1.4364592462751973,
"grad_norm": 0.4270734190940857,
"learning_rate": 8.89755194324299e-06,
"loss": 0.6646,
"step": 820
},
{
"epoch": 1.4715162138475022,
"grad_norm": 0.4163481593132019,
"learning_rate": 8.837639387605031e-06,
"loss": 0.6658,
"step": 840
},
{
"epoch": 1.5065731814198071,
"grad_norm": 0.45280900597572327,
"learning_rate": 8.776354844077389e-06,
"loss": 0.6592,
"step": 860
},
{
"epoch": 1.541630148992112,
"grad_norm": 0.40485361218452454,
"learning_rate": 8.713720222392338e-06,
"loss": 0.6579,
"step": 880
},
{
"epoch": 1.5766871165644172,
"grad_norm": 0.42039763927459717,
"learning_rate": 8.649757914946284e-06,
"loss": 0.6616,
"step": 900
},
{
"epoch": 1.6117440841367223,
"grad_norm": 0.4760454595088959,
"learning_rate": 8.584490788794296e-06,
"loss": 0.6572,
"step": 920
},
{
"epoch": 1.6468010517090272,
"grad_norm": 0.43802690505981445,
"learning_rate": 8.517942177474943e-06,
"loss": 0.6548,
"step": 940
},
{
"epoch": 1.6818580192813322,
"grad_norm": 0.5002708435058594,
"learning_rate": 8.450135872668369e-06,
"loss": 0.6557,
"step": 960
},
{
"epoch": 1.716914986853637,
"grad_norm": 0.4160609543323517,
"learning_rate": 8.38109611569056e-06,
"loss": 0.6529,
"step": 980
},
{
"epoch": 1.751971954425942,
"grad_norm": 0.43179649114608765,
"learning_rate": 8.310847588826876e-06,
"loss": 0.6529,
"step": 1000
},
{
"epoch": 1.7870289219982471,
"grad_norm": 0.4322780668735504,
"learning_rate": 8.239415406507934e-06,
"loss": 0.6535,
"step": 1020
},
{
"epoch": 1.8220858895705523,
"grad_norm": 0.4642186462879181,
"learning_rate": 8.166825106330985e-06,
"loss": 0.649,
"step": 1040
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.42697349190711975,
"learning_rate": 8.093102639930013e-06,
"loss": 0.65,
"step": 1060
},
{
"epoch": 1.8921998247151621,
"grad_norm": 0.4486387372016907,
"learning_rate": 8.01827436369781e-06,
"loss": 0.6492,
"step": 1080
},
{
"epoch": 1.927256792287467,
"grad_norm": 0.42962825298309326,
"learning_rate": 7.942367029363351e-06,
"loss": 0.6518,
"step": 1100
},
{
"epoch": 1.962313759859772,
"grad_norm": 0.43645408749580383,
"learning_rate": 7.865407774427828e-06,
"loss": 0.6475,
"step": 1120
},
{
"epoch": 1.997370727432077,
"grad_norm": 0.4662039875984192,
"learning_rate": 7.787424112462758e-06,
"loss": 0.649,
"step": 1140
},
{
"epoch": 2.0315512708150747,
"grad_norm": 0.44152551889419556,
"learning_rate": 7.708443923273671e-06,
"loss": 0.6401,
"step": 1160
},
{
"epoch": 2.0666082383873796,
"grad_norm": 0.39342495799064636,
"learning_rate": 7.628495442932838e-06,
"loss": 0.6386,
"step": 1180
},
{
"epoch": 2.1016652059596845,
"grad_norm": 0.4356766641139984,
"learning_rate": 7.54760725368464e-06,
"loss": 0.6391,
"step": 1200
},
{
"epoch": 2.1367221735319895,
"grad_norm": 0.3897708058357239,
"learning_rate": 7.465808273727182e-06,
"loss": 0.6383,
"step": 1220
},
{
"epoch": 2.1717791411042944,
"grad_norm": 0.4168529510498047,
"learning_rate": 7.383127746873796e-06,
"loss": 0.6361,
"step": 1240
},
{
"epoch": 2.2068361086765993,
"grad_norm": 0.39462465047836304,
"learning_rate": 7.2995952320981356e-06,
"loss": 0.6371,
"step": 1260
},
{
"epoch": 2.2418930762489047,
"grad_norm": 0.42870041728019714,
"learning_rate": 7.215240592966603e-06,
"loss": 0.6318,
"step": 1280
},
{
"epoch": 2.2769500438212096,
"grad_norm": 0.46848800778388977,
"learning_rate": 7.130093986961868e-06,
"loss": 0.633,
"step": 1300
},
{
"epoch": 2.3120070113935145,
"grad_norm": 0.415912002325058,
"learning_rate": 7.044185854701321e-06,
"loss": 0.6367,
"step": 1320
},
{
"epoch": 2.3470639789658194,
"grad_norm": 0.4347931444644928,
"learning_rate": 6.957546909054304e-06,
"loss": 0.6374,
"step": 1340
},
{
"epoch": 2.3821209465381243,
"grad_norm": 0.4282444417476654,
"learning_rate": 6.870208124161998e-06,
"loss": 0.6353,
"step": 1360
},
{
"epoch": 2.4171779141104293,
"grad_norm": 0.43224233388900757,
"learning_rate": 6.78220072436392e-06,
"loss": 0.6348,
"step": 1380
},
{
"epoch": 2.4522348816827346,
"grad_norm": 0.4176190495491028,
"learning_rate": 6.693556173034953e-06,
"loss": 0.633,
"step": 1400
},
{
"epoch": 2.4872918492550395,
"grad_norm": 0.4181615710258484,
"learning_rate": 6.6043061613369356e-06,
"loss": 0.6324,
"step": 1420
},
{
"epoch": 2.5223488168273445,
"grad_norm": 0.38148173689842224,
"learning_rate": 6.514482596888807e-06,
"loss": 0.6301,
"step": 1440
},
{
"epoch": 2.5574057843996494,
"grad_norm": 0.41031816601753235,
"learning_rate": 6.424117592359367e-06,
"loss": 0.6332,
"step": 1460
},
{
"epoch": 2.5924627519719543,
"grad_norm": 0.4005562961101532,
"learning_rate": 6.333243453986734e-06,
"loss": 0.6265,
"step": 1480
},
{
"epoch": 2.6275197195442592,
"grad_norm": 0.3940238058567047,
"learning_rate": 6.241892670028595e-06,
"loss": 0.6315,
"step": 1500
},
{
"epoch": 2.662576687116564,
"grad_norm": 0.4001730680465698,
"learning_rate": 6.150097899147384e-06,
"loss": 0.6299,
"step": 1520
},
{
"epoch": 2.6976336546888695,
"grad_norm": 0.3857872188091278,
"learning_rate": 6.057891958734538e-06,
"loss": 0.6304,
"step": 1540
},
{
"epoch": 2.7326906222611744,
"grad_norm": 0.4169263243675232,
"learning_rate": 5.965307813178015e-06,
"loss": 0.6315,
"step": 1560
},
{
"epoch": 2.7677475898334793,
"grad_norm": 0.4010975658893585,
"learning_rate": 5.872378562077241e-06,
"loss": 0.6297,
"step": 1580
},
{
"epoch": 2.8028045574057843,
"grad_norm": 0.4302142262458801,
"learning_rate": 5.779137428409738e-06,
"loss": 0.6302,
"step": 1600
},
{
"epoch": 2.8378615249780896,
"grad_norm": 0.39000585675239563,
"learning_rate": 5.685617746653629e-06,
"loss": 0.6312,
"step": 1620
},
{
"epoch": 2.8729184925503946,
"grad_norm": 0.4292212724685669,
"learning_rate": 5.591852950870287e-06,
"loss": 0.6312,
"step": 1640
},
{
"epoch": 2.9079754601226995,
"grad_norm": 0.3838886320590973,
"learning_rate": 5.497876562751384e-06,
"loss": 0.6302,
"step": 1660
},
{
"epoch": 2.9430324276950044,
"grad_norm": 0.36835259199142456,
"learning_rate": 5.403722179634602e-06,
"loss": 0.6292,
"step": 1680
},
{
"epoch": 2.9780893952673093,
"grad_norm": 0.3884848654270172,
"learning_rate": 5.309423462492314e-06,
"loss": 0.6261,
"step": 1700
},
{
"epoch": 3.0122699386503067,
"grad_norm": 0.3762246072292328,
"learning_rate": 5.215014123897504e-06,
"loss": 0.6202,
"step": 1720
},
{
"epoch": 3.0473269062226116,
"grad_norm": 0.38138872385025024,
"learning_rate": 5.120527915971235e-06,
"loss": 0.6205,
"step": 1740
},
{
"epoch": 3.0823838737949165,
"grad_norm": 0.38698920607566833,
"learning_rate": 5.0259986183160006e-06,
"loss": 0.6186,
"step": 1760
},
{
"epoch": 3.117440841367222,
"grad_norm": 0.378830224275589,
"learning_rate": 4.931460025939226e-06,
"loss": 0.6214,
"step": 1780
},
{
"epoch": 3.152497808939527,
"grad_norm": 0.3751004934310913,
"learning_rate": 4.836945937171279e-06,
"loss": 0.6209,
"step": 1800
},
{
"epoch": 3.1875547765118317,
"grad_norm": 0.3829745054244995,
"learning_rate": 4.742490141582279e-06,
"loss": 0.6213,
"step": 1820
},
{
"epoch": 3.2226117440841366,
"grad_norm": 0.4228389859199524,
"learning_rate": 4.648126407902058e-06,
"loss": 0.6193,
"step": 1840
},
{
"epoch": 3.2576687116564416,
"grad_norm": 0.367960125207901,
"learning_rate": 4.553888471947546e-06,
"loss": 0.6198,
"step": 1860
},
{
"epoch": 3.292725679228747,
"grad_norm": 0.39815646409988403,
"learning_rate": 4.4598100245619505e-06,
"loss": 0.6187,
"step": 1880
},
{
"epoch": 3.327782646801052,
"grad_norm": 0.3625248074531555,
"learning_rate": 4.3659246995699845e-06,
"loss": 0.6176,
"step": 1900
},
{
"epoch": 3.3628396143733568,
"grad_norm": 0.37671083211898804,
"learning_rate": 4.2722660617535105e-06,
"loss": 0.6182,
"step": 1920
},
{
"epoch": 3.3978965819456617,
"grad_norm": 0.3727245032787323,
"learning_rate": 4.178867594851849e-06,
"loss": 0.616,
"step": 1940
},
{
"epoch": 3.4329535495179666,
"grad_norm": 0.361914724111557,
"learning_rate": 4.085762689591054e-06,
"loss": 0.6157,
"step": 1960
},
{
"epoch": 3.4680105170902715,
"grad_norm": 0.3587988018989563,
"learning_rate": 3.992984631746469e-06,
"loss": 0.6188,
"step": 1980
},
{
"epoch": 3.5030674846625764,
"grad_norm": 0.3729381561279297,
"learning_rate": 3.9005665902427695e-06,
"loss": 0.6208,
"step": 2000
},
{
"epoch": 3.538124452234882,
"grad_norm": 0.35941800475120544,
"learning_rate": 3.8085416052958107e-06,
"loss": 0.6183,
"step": 2020
},
{
"epoch": 3.5731814198071867,
"grad_norm": 0.35061511397361755,
"learning_rate": 3.7169425766004653e-06,
"loss": 0.6148,
"step": 2040
},
{
"epoch": 3.6082383873794917,
"grad_norm": 0.36660343408584595,
"learning_rate": 3.6258022515687215e-06,
"loss": 0.6186,
"step": 2060
},
{
"epoch": 3.6432953549517966,
"grad_norm": 0.4035237729549408,
"learning_rate": 3.5351532136222012e-06,
"loss": 0.616,
"step": 2080
},
{
"epoch": 3.678352322524102,
"grad_norm": 0.3646794855594635,
"learning_rate": 3.445027870543323e-06,
"loss": 0.6145,
"step": 2100
},
{
"epoch": 3.713409290096407,
"grad_norm": 0.38012927770614624,
"learning_rate": 3.3554584428892488e-06,
"loss": 0.6181,
"step": 2120
},
{
"epoch": 3.7484662576687118,
"grad_norm": 0.36509743332862854,
"learning_rate": 3.2664769524727712e-06,
"loss": 0.616,
"step": 2140
},
{
"epoch": 3.7835232252410167,
"grad_norm": 0.38520926237106323,
"learning_rate": 3.178115210914242e-06,
"loss": 0.6169,
"step": 2160
},
{
"epoch": 3.8185801928133216,
"grad_norm": 0.3496517837047577,
"learning_rate": 3.0904048082686655e-06,
"loss": 0.6158,
"step": 2180
},
{
"epoch": 3.8536371603856265,
"grad_norm": 0.37065935134887695,
"learning_rate": 3.00337710173198e-06,
"loss": 0.6162,
"step": 2200
},
{
"epoch": 3.8886941279579315,
"grad_norm": 0.339855819940567,
"learning_rate": 2.9170632044306137e-06,
"loss": 0.6156,
"step": 2220
},
{
"epoch": 3.9237510955302364,
"grad_norm": 0.3591175675392151,
"learning_rate": 2.8314939742982673e-06,
"loss": 0.6144,
"step": 2240
},
{
"epoch": 3.9588080631025417,
"grad_norm": 0.3600142300128937,
"learning_rate": 2.746700003043964e-06,
"loss": 0.6195,
"step": 2260
},
{
"epoch": 3.9938650306748467,
"grad_norm": 0.3738831579685211,
"learning_rate": 2.662711605215248e-06,
"loss": 0.6135,
"step": 2280
},
{
"epoch": 4.028045574057844,
"grad_norm": 0.3281383514404297,
"learning_rate": 2.579558807360489e-06,
"loss": 0.6104,
"step": 2300
},
{
"epoch": 4.063102541630149,
"grad_norm": 0.3486866354942322,
"learning_rate": 2.4972713372941406e-06,
"loss": 0.6095,
"step": 2320
},
{
"epoch": 4.098159509202454,
"grad_norm": 0.3392680287361145,
"learning_rate": 2.4158786134687966e-06,
"loss": 0.6108,
"step": 2340
},
{
"epoch": 4.133216476774759,
"grad_norm": 0.3901905417442322,
"learning_rate": 2.3354097344578565e-06,
"loss": 0.6123,
"step": 2360
},
{
"epoch": 4.168273444347064,
"grad_norm": 0.33477798104286194,
"learning_rate": 2.25589346855254e-06,
"loss": 0.6079,
"step": 2380
},
{
"epoch": 4.203330411919369,
"grad_norm": 0.35764065384864807,
"learning_rate": 2.1773582434769854e-06,
"loss": 0.6066,
"step": 2400
},
{
"epoch": 4.238387379491674,
"grad_norm": 0.34379830956459045,
"learning_rate": 2.0998321362251036e-06,
"loss": 0.608,
"step": 2420
},
{
"epoch": 4.273444347063979,
"grad_norm": 0.3394622206687927,
"learning_rate": 2.023342863022819e-06,
"loss": 0.6142,
"step": 2440
},
{
"epoch": 4.308501314636284,
"grad_norm": 0.35136643052101135,
"learning_rate": 1.94791776941929e-06,
"loss": 0.6115,
"step": 2460
},
{
"epoch": 4.343558282208589,
"grad_norm": 0.3298383355140686,
"learning_rate": 1.873583820510647e-06,
"loss": 0.6089,
"step": 2480
},
{
"epoch": 4.378615249780894,
"grad_norm": 0.3390386402606964,
"learning_rate": 1.8003675912997487e-06,
"loss": 0.612,
"step": 2500
},
{
"epoch": 4.413672217353199,
"grad_norm": 0.3399540185928345,
"learning_rate": 1.7282952571953987e-06,
"loss": 0.6123,
"step": 2520
},
{
"epoch": 4.448729184925504,
"grad_norm": 0.3504091203212738,
"learning_rate": 1.657392584654412e-06,
"loss": 0.6078,
"step": 2540
},
{
"epoch": 4.483786152497809,
"grad_norm": 0.3221462070941925,
"learning_rate": 1.587684921969912e-06,
"loss": 0.6142,
"step": 2560
},
{
"epoch": 4.518843120070114,
"grad_norm": 0.34956008195877075,
"learning_rate": 1.5191971902090797e-06,
"loss": 0.6085,
"step": 2580
},
{
"epoch": 4.553900087642419,
"grad_norm": 0.34091153740882874,
"learning_rate": 1.4519538743036927e-06,
"loss": 0.6117,
"step": 2600
},
{
"epoch": 4.588957055214724,
"grad_norm": 0.34645262360572815,
"learning_rate": 1.385979014296533e-06,
"loss": 0.6076,
"step": 2620
},
{
"epoch": 4.624014022787029,
"grad_norm": 0.3348851799964905,
"learning_rate": 1.3212961967468985e-06,
"loss": 0.6116,
"step": 2640
},
{
"epoch": 4.659070990359334,
"grad_norm": 0.32889384031295776,
"learning_rate": 1.2579285462981855e-06,
"loss": 0.6129,
"step": 2660
},
{
"epoch": 4.694127957931639,
"grad_norm": 0.3461220860481262,
"learning_rate": 1.195898717410664e-06,
"loss": 0.6106,
"step": 2680
},
{
"epoch": 4.729184925503944,
"grad_norm": 0.319545716047287,
"learning_rate": 1.1352288862622968e-06,
"loss": 0.6068,
"step": 2700
},
{
"epoch": 4.764241893076249,
"grad_norm": 0.3272876739501953,
"learning_rate": 1.075940742820588e-06,
"loss": 0.6082,
"step": 2720
},
{
"epoch": 4.799298860648554,
"grad_norm": 0.3398887515068054,
"learning_rate": 1.0180554830882333e-06,
"loss": 0.6069,
"step": 2740
},
{
"epoch": 4.8343558282208585,
"grad_norm": 0.32064250111579895,
"learning_rate": 9.615938015253723e-07,
"loss": 0.6086,
"step": 2760
},
{
"epoch": 4.869412795793163,
"grad_norm": 0.32411012053489685,
"learning_rate": 9.065758836511556e-07,
"loss": 0.6083,
"step": 2780
},
{
"epoch": 4.904469763365469,
"grad_norm": 0.31640660762786865,
"learning_rate": 8.53021398827239e-07,
"loss": 0.6094,
"step": 2800
},
{
"epoch": 4.939526730937774,
"grad_norm": 0.3243345320224762,
"learning_rate": 8.009494932258427e-07,
"loss": 0.6104,
"step": 2820
},
{
"epoch": 4.974583698510079,
"grad_norm": 0.31760430335998535,
"learning_rate": 7.503787829848191e-07,
"loss": 0.6078,
"step": 2840
},
{
"epoch": 5.0087642418930765,
"grad_norm": 0.32336053252220154,
"learning_rate": 7.013273475522392e-07,
"loss": 0.609,
"step": 2860
},
{
"epoch": 5.043821209465381,
"grad_norm": 0.31958338618278503,
"learning_rate": 6.53812723222838e-07,
"loss": 0.6073,
"step": 2880
},
{
"epoch": 5.078878177037686,
"grad_norm": 0.319396436214447,
"learning_rate": 6.078518968686426e-07,
"loss": 0.6096,
"step": 2900
},
{
"epoch": 5.113935144609991,
"grad_norm": 0.33287620544433594,
"learning_rate": 5.634612998660249e-07,
"loss": 0.6011,
"step": 2920
},
{
"epoch": 5.148992112182296,
"grad_norm": 0.32154789566993713,
"learning_rate": 5.206568022213482e-07,
"loss": 0.6081,
"step": 2940
},
{
"epoch": 5.184049079754601,
"grad_norm": 0.3217925727367401,
"learning_rate": 4.794537068973093e-07,
"loss": 0.607,
"step": 2960
},
{
"epoch": 5.219106047326906,
"grad_norm": 0.3143027722835541,
"learning_rate": 4.398667443420029e-07,
"loss": 0.6065,
"step": 2980
},
{
"epoch": 5.254163014899211,
"grad_norm": 0.3175284266471863,
"learning_rate": 4.019100672226617e-07,
"loss": 0.6056,
"step": 3000
},
{
"epoch": 5.289219982471516,
"grad_norm": 0.32859277725219727,
"learning_rate": 3.65597245365964e-07,
"loss": 0.6063,
"step": 3020
},
{
"epoch": 5.324276950043822,
"grad_norm": 0.3200486898422241,
"learning_rate": 3.3094126090670477e-07,
"loss": 0.6074,
"step": 3040
},
{
"epoch": 5.3593339176161265,
"grad_norm": 0.3193705976009369,
"learning_rate": 2.9795450364657865e-07,
"loss": 0.607,
"step": 3060
},
{
"epoch": 5.3943908851884315,
"grad_norm": 0.32681772112846375,
"learning_rate": 2.6664876662471697e-07,
"loss": 0.6053,
"step": 3080
},
{
"epoch": 5.429447852760736,
"grad_norm": 0.31692323088645935,
"learning_rate": 2.370352419015892e-07,
"loss": 0.608,
"step": 3100
},
{
"epoch": 5.464504820333041,
"grad_norm": 0.32039353251457214,
"learning_rate": 2.091245165577349e-07,
"loss": 0.6079,
"step": 3120
},
{
"epoch": 5.499561787905346,
"grad_norm": 0.31619492173194885,
"learning_rate": 1.8292656890880722e-07,
"loss": 0.6045,
"step": 3140
},
{
"epoch": 5.534618755477651,
"grad_norm": 0.31279516220092773,
"learning_rate": 1.5845076493823331e-07,
"loss": 0.6066,
"step": 3160
},
{
"epoch": 5.569675723049956,
"grad_norm": 0.3182794451713562,
"learning_rate": 1.3570585494880328e-07,
"loss": 0.6085,
"step": 3180
},
{
"epoch": 5.604732690622261,
"grad_norm": 0.31176891922950745,
"learning_rate": 1.1469997043436154e-07,
"loss": 0.6048,
"step": 3200
},
{
"epoch": 5.639789658194566,
"grad_norm": 0.31051337718963623,
"learning_rate": 9.544062117273045e-08,
"loss": 0.607,
"step": 3220
},
{
"epoch": 5.674846625766871,
"grad_norm": 0.31119874119758606,
"learning_rate": 7.793469254090524e-08,
"loss": 0.6054,
"step": 3240
},
{
"epoch": 5.709903593339176,
"grad_norm": 0.3019949793815613,
"learning_rate": 6.218844305346916e-08,
"loss": 0.6074,
"step": 3260
},
{
"epoch": 5.7449605609114816,
"grad_norm": 0.30909818410873413,
"learning_rate": 4.820750212513048e-08,
"loss": 0.6085,
"step": 3280
},
{
"epoch": 5.7800175284837865,
"grad_norm": 0.30439695715904236,
"learning_rate": 3.599686805815128e-08,
"loss": 0.6101,
"step": 3300
},
{
"epoch": 5.815074496056091,
"grad_norm": 0.3216073215007782,
"learning_rate": 2.5560906255420737e-08,
"loss": 0.6092,
"step": 3320
},
{
"epoch": 5.850131463628396,
"grad_norm": 0.3213990330696106,
"learning_rate": 1.6903347659781856e-08,
"loss": 0.6071,
"step": 3340
},
{
"epoch": 5.885188431200701,
"grad_norm": 0.3153989613056183,
"learning_rate": 1.0027287420192322e-08,
"loss": 0.6074,
"step": 3360
},
{
"epoch": 5.920245398773006,
"grad_norm": 0.308703750371933,
"learning_rate": 4.935183785180209e-09,
"loss": 0.6058,
"step": 3380
},
{
"epoch": 5.955302366345311,
"grad_norm": 0.3108364939689636,
"learning_rate": 1.6288572240014123e-09,
"loss": 0.6038,
"step": 3400
},
{
"epoch": 5.990359333917616,
"grad_norm": 0.313579261302948,
"learning_rate": 1.0948977580638176e-10,
"loss": 0.6079,
"step": 3420
},
{
"epoch": 6.0,
"step": 3426,
"total_flos": 9.485754681343869e+18,
"train_loss": 0.6827397172519218,
"train_runtime": 47545.1809,
"train_samples_per_second": 4.607,
"train_steps_per_second": 0.072
}
],
"logging_steps": 20,
"max_steps": 3426,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.485754681343869e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}