Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
xiangmin's picture
Model save
6e036f5 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 733,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0068212824010914054,
"grad_norm": 1.881799613195007,
"learning_rate": 5.405405405405406e-06,
"loss": 0.8521,
"num_tokens": 3759146.0,
"step": 5
},
{
"epoch": 0.013642564802182811,
"grad_norm": 1.7101008743112052,
"learning_rate": 1.2162162162162164e-05,
"loss": 0.8047,
"num_tokens": 7668808.0,
"step": 10
},
{
"epoch": 0.020463847203274217,
"grad_norm": 0.6865141351597414,
"learning_rate": 1.891891891891892e-05,
"loss": 0.7189,
"num_tokens": 11368873.0,
"step": 15
},
{
"epoch": 0.027285129604365622,
"grad_norm": 0.5605759414177258,
"learning_rate": 2.5675675675675675e-05,
"loss": 0.6773,
"num_tokens": 15118063.0,
"step": 20
},
{
"epoch": 0.034106412005457026,
"grad_norm": 0.48933370420808264,
"learning_rate": 3.2432432432432436e-05,
"loss": 0.6547,
"num_tokens": 18906839.0,
"step": 25
},
{
"epoch": 0.040927694406548434,
"grad_norm": 0.4114426768382651,
"learning_rate": 3.918918918918919e-05,
"loss": 0.6323,
"num_tokens": 22641755.0,
"step": 30
},
{
"epoch": 0.047748976807639835,
"grad_norm": 0.383721233103028,
"learning_rate": 4.594594594594595e-05,
"loss": 0.6278,
"num_tokens": 26636629.0,
"step": 35
},
{
"epoch": 0.054570259208731244,
"grad_norm": 0.4012112843562271,
"learning_rate": 4.999908316574644e-05,
"loss": 0.5974,
"num_tokens": 30417967.0,
"step": 40
},
{
"epoch": 0.061391541609822645,
"grad_norm": 0.3692014531581435,
"learning_rate": 4.998876963847189e-05,
"loss": 0.6003,
"num_tokens": 34231333.0,
"step": 45
},
{
"epoch": 0.06821282401091405,
"grad_norm": 0.3955585216188017,
"learning_rate": 4.996700181165029e-05,
"loss": 0.6003,
"num_tokens": 37961424.0,
"step": 50
},
{
"epoch": 0.07503410641200546,
"grad_norm": 0.4068234869096405,
"learning_rate": 4.993379077238036e-05,
"loss": 0.6008,
"num_tokens": 41826860.0,
"step": 55
},
{
"epoch": 0.08185538881309687,
"grad_norm": 0.4435585222122122,
"learning_rate": 4.9889153436180295e-05,
"loss": 0.5886,
"num_tokens": 45543403.0,
"step": 60
},
{
"epoch": 0.08867667121418826,
"grad_norm": 0.44550926213945213,
"learning_rate": 4.983311253837213e-05,
"loss": 0.5926,
"num_tokens": 49369486.0,
"step": 65
},
{
"epoch": 0.09549795361527967,
"grad_norm": 0.41100266811481995,
"learning_rate": 4.9765696622501846e-05,
"loss": 0.582,
"num_tokens": 53010874.0,
"step": 70
},
{
"epoch": 0.10231923601637108,
"grad_norm": 0.391153857076114,
"learning_rate": 4.968694002580118e-05,
"loss": 0.5841,
"num_tokens": 56909889.0,
"step": 75
},
{
"epoch": 0.10914051841746249,
"grad_norm": 0.4403959276876663,
"learning_rate": 4.959688286169851e-05,
"loss": 0.5676,
"num_tokens": 60650570.0,
"step": 80
},
{
"epoch": 0.11596180081855388,
"grad_norm": 0.4486997016217728,
"learning_rate": 4.9495570999387685e-05,
"loss": 0.5613,
"num_tokens": 64564660.0,
"step": 85
},
{
"epoch": 0.12278308321964529,
"grad_norm": 0.4400583628743631,
"learning_rate": 4.9383056040465276e-05,
"loss": 0.5794,
"num_tokens": 68426882.0,
"step": 90
},
{
"epoch": 0.1296043656207367,
"grad_norm": 0.39088631890644404,
"learning_rate": 4.925939529264815e-05,
"loss": 0.5751,
"num_tokens": 72252819.0,
"step": 95
},
{
"epoch": 0.1364256480218281,
"grad_norm": 0.39462317452965684,
"learning_rate": 4.9124651740584684e-05,
"loss": 0.5613,
"num_tokens": 76160914.0,
"step": 100
},
{
"epoch": 0.1432469304229195,
"grad_norm": 0.3854843360933772,
"learning_rate": 4.897889401377447e-05,
"loss": 0.5631,
"num_tokens": 80152955.0,
"step": 105
},
{
"epoch": 0.15006821282401092,
"grad_norm": 0.33367658431610214,
"learning_rate": 4.882219635161306e-05,
"loss": 0.5667,
"num_tokens": 83901702.0,
"step": 110
},
{
"epoch": 0.15688949522510232,
"grad_norm": 0.42482706184560826,
"learning_rate": 4.865463856557922e-05,
"loss": 0.5655,
"num_tokens": 87691018.0,
"step": 115
},
{
"epoch": 0.16371077762619374,
"grad_norm": 0.3842124056837664,
"learning_rate": 4.847630599858426e-05,
"loss": 0.5547,
"num_tokens": 91542428.0,
"step": 120
},
{
"epoch": 0.17053206002728513,
"grad_norm": 0.337517505531631,
"learning_rate": 4.8287289481503954e-05,
"loss": 0.5615,
"num_tokens": 95438170.0,
"step": 125
},
{
"epoch": 0.17735334242837653,
"grad_norm": 0.4029687538155939,
"learning_rate": 4.8087685286915276e-05,
"loss": 0.5572,
"num_tokens": 99383692.0,
"step": 130
},
{
"epoch": 0.18417462482946795,
"grad_norm": 0.40405586857695963,
"learning_rate": 4.787759508006147e-05,
"loss": 0.5568,
"num_tokens": 103223537.0,
"step": 135
},
{
"epoch": 0.19099590723055934,
"grad_norm": 0.4324587813631283,
"learning_rate": 4.765712586707048e-05,
"loss": 0.5694,
"num_tokens": 106901687.0,
"step": 140
},
{
"epoch": 0.19781718963165076,
"grad_norm": 0.4024036288115306,
"learning_rate": 4.7426389940453065e-05,
"loss": 0.542,
"num_tokens": 110840758.0,
"step": 145
},
{
"epoch": 0.20463847203274216,
"grad_norm": 0.42624775460173636,
"learning_rate": 4.718550482190837e-05,
"loss": 0.5579,
"num_tokens": 114521504.0,
"step": 150
},
{
"epoch": 0.21145975443383355,
"grad_norm": 0.3394520387215503,
"learning_rate": 4.6934593202466127e-05,
"loss": 0.5424,
"num_tokens": 118445759.0,
"step": 155
},
{
"epoch": 0.21828103683492497,
"grad_norm": 0.36684991077490176,
"learning_rate": 4.6673782879995896e-05,
"loss": 0.5511,
"num_tokens": 122311693.0,
"step": 160
},
{
"epoch": 0.22510231923601637,
"grad_norm": 0.3418183579126294,
"learning_rate": 4.640320669411526e-05,
"loss": 0.554,
"num_tokens": 126094524.0,
"step": 165
},
{
"epoch": 0.23192360163710776,
"grad_norm": 0.3573109791196806,
"learning_rate": 4.612300245853004e-05,
"loss": 0.5473,
"num_tokens": 129971056.0,
"step": 170
},
{
"epoch": 0.23874488403819918,
"grad_norm": 0.4404011696788091,
"learning_rate": 4.5833312890841085e-05,
"loss": 0.562,
"num_tokens": 133765982.0,
"step": 175
},
{
"epoch": 0.24556616643929058,
"grad_norm": 0.39333767306296735,
"learning_rate": 4.553428553985329e-05,
"loss": 0.5417,
"num_tokens": 137522281.0,
"step": 180
},
{
"epoch": 0.252387448840382,
"grad_norm": 0.3766363049697423,
"learning_rate": 4.522607271042399e-05,
"loss": 0.5366,
"num_tokens": 141196084.0,
"step": 185
},
{
"epoch": 0.2592087312414734,
"grad_norm": 0.4330332210434021,
"learning_rate": 4.490883138588882e-05,
"loss": 0.548,
"num_tokens": 145136704.0,
"step": 190
},
{
"epoch": 0.2660300136425648,
"grad_norm": 0.3130688257115034,
"learning_rate": 4.458272314810479e-05,
"loss": 0.5358,
"num_tokens": 148940122.0,
"step": 195
},
{
"epoch": 0.2728512960436562,
"grad_norm": 0.2958242457017384,
"learning_rate": 4.4247914095151086e-05,
"loss": 0.5456,
"num_tokens": 152809678.0,
"step": 200
},
{
"epoch": 0.27967257844474763,
"grad_norm": 0.3631423972213163,
"learning_rate": 4.390457475672966e-05,
"loss": 0.5393,
"num_tokens": 156683573.0,
"step": 205
},
{
"epoch": 0.286493860845839,
"grad_norm": 0.33299961683660967,
"learning_rate": 4.35528800073086e-05,
"loss": 0.5408,
"num_tokens": 160433326.0,
"step": 210
},
{
"epoch": 0.2933151432469304,
"grad_norm": 0.34487494790560014,
"learning_rate": 4.31930089770526e-05,
"loss": 0.5442,
"num_tokens": 164374316.0,
"step": 215
},
{
"epoch": 0.30013642564802184,
"grad_norm": 0.33594816899871066,
"learning_rate": 4.282514496058582e-05,
"loss": 0.5238,
"num_tokens": 168223299.0,
"step": 220
},
{
"epoch": 0.3069577080491132,
"grad_norm": 0.2901309516034236,
"learning_rate": 4.24494753236337e-05,
"loss": 0.5365,
"num_tokens": 172132000.0,
"step": 225
},
{
"epoch": 0.31377899045020463,
"grad_norm": 0.3239699981737236,
"learning_rate": 4.2066191407591125e-05,
"loss": 0.5321,
"num_tokens": 176086331.0,
"step": 230
},
{
"epoch": 0.32060027285129605,
"grad_norm": 0.29347190102008297,
"learning_rate": 4.1675488432065785e-05,
"loss": 0.5244,
"num_tokens": 179917640.0,
"step": 235
},
{
"epoch": 0.3274215552523875,
"grad_norm": 0.2896636007704082,
"learning_rate": 4.127756539544609e-05,
"loss": 0.537,
"num_tokens": 183746129.0,
"step": 240
},
{
"epoch": 0.33424283765347884,
"grad_norm": 0.37369409333673687,
"learning_rate": 4.087262497354452e-05,
"loss": 0.5454,
"num_tokens": 187699370.0,
"step": 245
},
{
"epoch": 0.34106412005457026,
"grad_norm": 0.30263051124642876,
"learning_rate": 4.046087341636789e-05,
"loss": 0.5279,
"num_tokens": 191512142.0,
"step": 250
},
{
"epoch": 0.3478854024556617,
"grad_norm": 0.30602907118420253,
"learning_rate": 4.0042520443067176e-05,
"loss": 0.5292,
"num_tokens": 195367749.0,
"step": 255
},
{
"epoch": 0.35470668485675305,
"grad_norm": 0.30856496307351244,
"learning_rate": 3.961777913512035e-05,
"loss": 0.5182,
"num_tokens": 199215371.0,
"step": 260
},
{
"epoch": 0.3615279672578445,
"grad_norm": 0.2991290787509346,
"learning_rate": 3.9186865827802724e-05,
"loss": 0.5378,
"num_tokens": 202903048.0,
"step": 265
},
{
"epoch": 0.3683492496589359,
"grad_norm": 0.3122122629436248,
"learning_rate": 3.875e-05,
"loss": 0.5266,
"num_tokens": 206761213.0,
"step": 270
},
{
"epoch": 0.37517053206002726,
"grad_norm": 0.2853089099996772,
"learning_rate": 3.830740416242014e-05,
"loss": 0.5225,
"num_tokens": 210585632.0,
"step": 275
},
{
"epoch": 0.3819918144611187,
"grad_norm": 0.30512356970359406,
"learning_rate": 3.7859303744261064e-05,
"loss": 0.5282,
"num_tokens": 214261738.0,
"step": 280
},
{
"epoch": 0.3888130968622101,
"grad_norm": 0.27496627808176893,
"learning_rate": 3.740592697839185e-05,
"loss": 0.533,
"num_tokens": 218144024.0,
"step": 285
},
{
"epoch": 0.3956343792633015,
"grad_norm": 0.2708720303111062,
"learning_rate": 3.694750478510596e-05,
"loss": 0.5285,
"num_tokens": 222057295.0,
"step": 290
},
{
"epoch": 0.4024556616643929,
"grad_norm": 0.2731317149721799,
"learning_rate": 3.648427065450555e-05,
"loss": 0.5198,
"num_tokens": 225828573.0,
"step": 295
},
{
"epoch": 0.4092769440654843,
"grad_norm": 0.2838811843668512,
"learning_rate": 3.601646052757707e-05,
"loss": 0.519,
"num_tokens": 229710487.0,
"step": 300
},
{
"epoch": 0.41609822646657574,
"grad_norm": 0.31150738136265943,
"learning_rate": 3.55443126760184e-05,
"loss": 0.5344,
"num_tokens": 3907038.0,
"step": 305
},
{
"epoch": 0.4229195088676671,
"grad_norm": 0.3468593061143695,
"learning_rate": 3.506806758087894e-05,
"loss": 0.532,
"num_tokens": 7683984.0,
"step": 310
},
{
"epoch": 0.4297407912687585,
"grad_norm": 0.31656776462890823,
"learning_rate": 3.458796781007437e-05,
"loss": 0.5267,
"num_tokens": 11403774.0,
"step": 315
},
{
"epoch": 0.43656207366984995,
"grad_norm": 0.2905862187240268,
"learning_rate": 3.410425789483854e-05,
"loss": 0.527,
"num_tokens": 15257500.0,
"step": 320
},
{
"epoch": 0.4433833560709413,
"grad_norm": 0.2862429453666647,
"learning_rate": 3.3617184205175304e-05,
"loss": 0.5335,
"num_tokens": 19040608.0,
"step": 325
},
{
"epoch": 0.45020463847203274,
"grad_norm": 0.34169112866025547,
"learning_rate": 3.312699482437392e-05,
"loss": 0.5207,
"num_tokens": 22896778.0,
"step": 330
},
{
"epoch": 0.45702592087312416,
"grad_norm": 0.28811353179764293,
"learning_rate": 3.263393942265168e-05,
"loss": 0.5274,
"num_tokens": 26707422.0,
"step": 335
},
{
"epoch": 0.4638472032742155,
"grad_norm": 0.25952079134670186,
"learning_rate": 3.213826912998838e-05,
"loss": 0.5199,
"num_tokens": 30745942.0,
"step": 340
},
{
"epoch": 0.47066848567530695,
"grad_norm": 0.27171779465267426,
"learning_rate": 3.164023640821719e-05,
"loss": 0.5131,
"num_tokens": 34577418.0,
"step": 345
},
{
"epoch": 0.47748976807639837,
"grad_norm": 0.2756410136692784,
"learning_rate": 3.114009492243721e-05,
"loss": 0.5216,
"num_tokens": 38388303.0,
"step": 350
},
{
"epoch": 0.4843110504774898,
"grad_norm": 0.2862166299702774,
"learning_rate": 3.063809941181321e-05,
"loss": 0.5312,
"num_tokens": 42263578.0,
"step": 355
},
{
"epoch": 0.49113233287858116,
"grad_norm": 0.2776215155538455,
"learning_rate": 3.0134505559828203e-05,
"loss": 0.535,
"num_tokens": 46141558.0,
"step": 360
},
{
"epoch": 0.4979536152796726,
"grad_norm": 0.27939238016420803,
"learning_rate": 2.9629569864055125e-05,
"loss": 0.5129,
"num_tokens": 49793997.0,
"step": 365
},
{
"epoch": 0.504774897680764,
"grad_norm": 0.27656045657678896,
"learning_rate": 2.9123549505513868e-05,
"loss": 0.5149,
"num_tokens": 53751059.0,
"step": 370
},
{
"epoch": 0.5115961800818554,
"grad_norm": 0.2612718920094276,
"learning_rate": 2.8616702217680134e-05,
"loss": 0.5229,
"num_tokens": 57661431.0,
"step": 375
},
{
"epoch": 0.5184174624829468,
"grad_norm": 0.2707456092353873,
"learning_rate": 2.810928615521303e-05,
"loss": 0.5096,
"num_tokens": 61347251.0,
"step": 380
},
{
"epoch": 0.5252387448840382,
"grad_norm": 0.2775857141913436,
"learning_rate": 2.7601559762468022e-05,
"loss": 0.5188,
"num_tokens": 65171476.0,
"step": 385
},
{
"epoch": 0.5320600272851296,
"grad_norm": 0.2749180870715325,
"learning_rate": 2.7093781641862387e-05,
"loss": 0.5214,
"num_tokens": 68967408.0,
"step": 390
},
{
"epoch": 0.538881309686221,
"grad_norm": 0.2736617594464445,
"learning_rate": 2.658621042216021e-05,
"loss": 0.5056,
"num_tokens": 72677498.0,
"step": 395
},
{
"epoch": 0.5457025920873124,
"grad_norm": 0.3724340666807813,
"learning_rate": 2.6079104626743845e-05,
"loss": 0.5275,
"num_tokens": 76420106.0,
"step": 400
},
{
"epoch": 0.5525238744884038,
"grad_norm": 0.30148187800324094,
"learning_rate": 2.5572722541939113e-05,
"loss": 0.536,
"num_tokens": 80241521.0,
"step": 405
},
{
"epoch": 0.5593451568894953,
"grad_norm": 0.2878146937468534,
"learning_rate": 2.5067322085461315e-05,
"loss": 0.5108,
"num_tokens": 84015002.0,
"step": 410
},
{
"epoch": 0.5661664392905866,
"grad_norm": 0.28481014661061205,
"learning_rate": 2.4563160675048846e-05,
"loss": 0.5158,
"num_tokens": 87814661.0,
"step": 415
},
{
"epoch": 0.572987721691678,
"grad_norm": 0.24871308571539336,
"learning_rate": 2.406049509735156e-05,
"loss": 0.5154,
"num_tokens": 91699614.0,
"step": 420
},
{
"epoch": 0.5798090040927695,
"grad_norm": 0.26997468779109357,
"learning_rate": 2.355958137714056e-05,
"loss": 0.5108,
"num_tokens": 95391791.0,
"step": 425
},
{
"epoch": 0.5866302864938608,
"grad_norm": 0.26920569189193183,
"learning_rate": 2.3060674646906004e-05,
"loss": 0.5156,
"num_tokens": 99120584.0,
"step": 430
},
{
"epoch": 0.5934515688949522,
"grad_norm": 0.3031035959753903,
"learning_rate": 2.2564029016909416e-05,
"loss": 0.5049,
"num_tokens": 103056557.0,
"step": 435
},
{
"epoch": 0.6002728512960437,
"grad_norm": 0.2579912157887462,
"learning_rate": 2.2069897445756627e-05,
"loss": 0.5028,
"num_tokens": 106885151.0,
"step": 440
},
{
"epoch": 0.607094133697135,
"grad_norm": 0.2784368496163028,
"learning_rate": 2.1578531611557322e-05,
"loss": 0.5158,
"num_tokens": 110648438.0,
"step": 445
},
{
"epoch": 0.6139154160982264,
"grad_norm": 0.23783581714130014,
"learning_rate": 2.109018178373675e-05,
"loss": 0.5147,
"num_tokens": 114528571.0,
"step": 450
},
{
"epoch": 0.6207366984993179,
"grad_norm": 0.2631301529092457,
"learning_rate": 2.0605096695564973e-05,
"loss": 0.5182,
"num_tokens": 118370098.0,
"step": 455
},
{
"epoch": 0.6275579809004093,
"grad_norm": 0.23896035357377207,
"learning_rate": 2.0123523417468466e-05,
"loss": 0.5115,
"num_tokens": 122107208.0,
"step": 460
},
{
"epoch": 0.6343792633015006,
"grad_norm": 0.23911200744006897,
"learning_rate": 1.9645707231188742e-05,
"loss": 0.5059,
"num_tokens": 125928696.0,
"step": 465
},
{
"epoch": 0.6412005457025921,
"grad_norm": 0.23484644246089895,
"learning_rate": 1.9171891504851925e-05,
"loss": 0.5245,
"num_tokens": 129727094.0,
"step": 470
},
{
"epoch": 0.6480218281036835,
"grad_norm": 0.24843133644098161,
"learning_rate": 1.8702317569013094e-05,
"loss": 0.5005,
"num_tokens": 133479496.0,
"step": 475
},
{
"epoch": 0.654843110504775,
"grad_norm": 0.2298705134606327,
"learning_rate": 1.8237224593738327e-05,
"loss": 0.5027,
"num_tokens": 137152722.0,
"step": 480
},
{
"epoch": 0.6616643929058663,
"grad_norm": 0.22725265159045924,
"learning_rate": 1.7776849466787223e-05,
"loss": 0.5171,
"num_tokens": 141015373.0,
"step": 485
},
{
"epoch": 0.6684856753069577,
"grad_norm": 0.23065567630923026,
"learning_rate": 1.7321426672957896e-05,
"loss": 0.5025,
"num_tokens": 144856028.0,
"step": 490
},
{
"epoch": 0.6753069577080492,
"grad_norm": 0.2535249608874267,
"learning_rate": 1.6871188174655787e-05,
"loss": 0.4957,
"num_tokens": 148620002.0,
"step": 495
},
{
"epoch": 0.6821282401091405,
"grad_norm": 0.2364234203218564,
"learning_rate": 1.6426363293747334e-05,
"loss": 0.5001,
"num_tokens": 152392981.0,
"step": 500
},
{
"epoch": 0.6889495225102319,
"grad_norm": 0.23889880611363135,
"learning_rate": 1.598717859475846e-05,
"loss": 0.5087,
"num_tokens": 156126810.0,
"step": 505
},
{
"epoch": 0.6957708049113234,
"grad_norm": 0.2380901600704711,
"learning_rate": 1.5553857769477553e-05,
"loss": 0.5056,
"num_tokens": 159876193.0,
"step": 510
},
{
"epoch": 0.7025920873124147,
"grad_norm": 0.23096714274240995,
"learning_rate": 1.5126621523021518e-05,
"loss": 0.51,
"num_tokens": 163718273.0,
"step": 515
},
{
"epoch": 0.7094133697135061,
"grad_norm": 0.2614099659251973,
"learning_rate": 1.4705687461423209e-05,
"loss": 0.5222,
"num_tokens": 167448213.0,
"step": 520
},
{
"epoch": 0.7162346521145976,
"grad_norm": 0.24328778538778423,
"learning_rate": 1.4291269980797139e-05,
"loss": 0.5065,
"num_tokens": 171213451.0,
"step": 525
},
{
"epoch": 0.723055934515689,
"grad_norm": 0.22585661056099277,
"learning_rate": 1.3883580158140291e-05,
"loss": 0.5005,
"num_tokens": 174975168.0,
"step": 530
},
{
"epoch": 0.7298772169167803,
"grad_norm": 0.215782449764828,
"learning_rate": 1.3482825643823293e-05,
"loss": 0.5061,
"num_tokens": 178872312.0,
"step": 535
},
{
"epoch": 0.7366984993178718,
"grad_norm": 0.222477664399686,
"learning_rate": 1.3089210555827086e-05,
"loss": 0.5118,
"num_tokens": 182675522.0,
"step": 540
},
{
"epoch": 0.7435197817189632,
"grad_norm": 0.2161279545613622,
"learning_rate": 1.270293537577855e-05,
"loss": 0.5148,
"num_tokens": 186661552.0,
"step": 545
},
{
"epoch": 0.7503410641200545,
"grad_norm": 0.22038438633805268,
"learning_rate": 1.232419684683844e-05,
"loss": 0.4995,
"num_tokens": 190341488.0,
"step": 550
},
{
"epoch": 0.757162346521146,
"grad_norm": 0.22277137142362485,
"learning_rate": 1.1953187873493303e-05,
"loss": 0.5001,
"num_tokens": 193975222.0,
"step": 555
},
{
"epoch": 0.7639836289222374,
"grad_norm": 0.22629794051271598,
"learning_rate": 1.1590097423302684e-05,
"loss": 0.496,
"num_tokens": 197695417.0,
"step": 560
},
{
"epoch": 0.7708049113233287,
"grad_norm": 0.22169310674789464,
"learning_rate": 1.1235110430651421e-05,
"loss": 0.496,
"num_tokens": 201577891.0,
"step": 565
},
{
"epoch": 0.7776261937244202,
"grad_norm": 0.23737206300393107,
"learning_rate": 1.0888407702556284e-05,
"loss": 0.4998,
"num_tokens": 205367508.0,
"step": 570
},
{
"epoch": 0.7844474761255116,
"grad_norm": 0.2393464614182903,
"learning_rate": 1.0550165826574766e-05,
"loss": 0.4997,
"num_tokens": 209296377.0,
"step": 575
},
{
"epoch": 0.791268758526603,
"grad_norm": 0.23434104494246727,
"learning_rate": 1.0220557080862985e-05,
"loss": 0.5149,
"num_tokens": 213335201.0,
"step": 580
},
{
"epoch": 0.7980900409276944,
"grad_norm": 0.2163862184189319,
"learning_rate": 9.899749346428556e-06,
"loss": 0.5021,
"num_tokens": 217141531.0,
"step": 585
},
{
"epoch": 0.8049113233287858,
"grad_norm": 0.20944439534899864,
"learning_rate": 9.587906021623016e-06,
"loss": 0.5161,
"num_tokens": 220976800.0,
"step": 590
},
{
"epoch": 0.8117326057298773,
"grad_norm": 0.240618025701599,
"learning_rate": 9.28518593891749e-06,
"loss": 0.5013,
"num_tokens": 224773409.0,
"step": 595
},
{
"epoch": 0.8185538881309686,
"grad_norm": 0.22360256823330674,
"learning_rate": 8.99174328400385e-06,
"loss": 0.4997,
"num_tokens": 228615374.0,
"step": 600
},
{
"epoch": 0.82537517053206,
"grad_norm": 0.22996095480097895,
"learning_rate": 8.707727517262697e-06,
"loss": 0.5049,
"num_tokens": 232345381.0,
"step": 605
},
{
"epoch": 0.8321964529331515,
"grad_norm": 0.20996010138499008,
"learning_rate": 8.433283297638053e-06,
"loss": 0.4993,
"num_tokens": 236263389.0,
"step": 610
},
{
"epoch": 0.8390177353342428,
"grad_norm": 0.21860095878565663,
"learning_rate": 8.168550408957632e-06,
"loss": 0.497,
"num_tokens": 240081011.0,
"step": 615
},
{
"epoch": 0.8458390177353342,
"grad_norm": 0.2245915474005529,
"learning_rate": 7.91366368873613e-06,
"loss": 0.4944,
"num_tokens": 243860094.0,
"step": 620
},
{
"epoch": 0.8526603001364257,
"grad_norm": 0.2282695347327931,
"learning_rate": 7.66875295949791e-06,
"loss": 0.5105,
"num_tokens": 247690866.0,
"step": 625
},
{
"epoch": 0.859481582537517,
"grad_norm": 0.20590616989328062,
"learning_rate": 7.4339429626539e-06,
"loss": 0.5102,
"num_tokens": 251638405.0,
"step": 630
},
{
"epoch": 0.8663028649386084,
"grad_norm": 0.22956608740204826,
"learning_rate": 7.2093532949665715e-06,
"loss": 0.5081,
"num_tokens": 255461423.0,
"step": 635
},
{
"epoch": 0.8731241473396999,
"grad_norm": 0.20757611016240565,
"learning_rate": 6.995098347635173e-06,
"loss": 0.4933,
"num_tokens": 259349061.0,
"step": 640
},
{
"epoch": 0.8799454297407913,
"grad_norm": 0.20964621674214579,
"learning_rate": 6.791287248032431e-06,
"loss": 0.4966,
"num_tokens": 263182542.0,
"step": 645
},
{
"epoch": 0.8867667121418826,
"grad_norm": 0.21533152300615474,
"learning_rate": 6.598023804122194e-06,
"loss": 0.5039,
"num_tokens": 267195187.0,
"step": 650
},
{
"epoch": 0.8935879945429741,
"grad_norm": 0.20262214665120437,
"learning_rate": 6.415406451586528e-06,
"loss": 0.4994,
"num_tokens": 271154055.0,
"step": 655
},
{
"epoch": 0.9004092769440655,
"grad_norm": 0.20455436089558837,
"learning_rate": 6.243528203689025e-06,
"loss": 0.5033,
"num_tokens": 275099879.0,
"step": 660
},
{
"epoch": 0.9072305593451568,
"grad_norm": 0.2160125533680774,
"learning_rate": 6.0824766039e-06,
"loss": 0.4994,
"num_tokens": 278896745.0,
"step": 665
},
{
"epoch": 0.9140518417462483,
"grad_norm": 0.20835263251242925,
"learning_rate": 5.932333681307571e-06,
"loss": 0.5062,
"num_tokens": 282763597.0,
"step": 670
},
{
"epoch": 0.9208731241473397,
"grad_norm": 0.2056127608186567,
"learning_rate": 5.793175908837471e-06,
"loss": 0.4967,
"num_tokens": 286505617.0,
"step": 675
},
{
"epoch": 0.927694406548431,
"grad_norm": 0.20738306050857,
"learning_rate": 5.665074164302742e-06,
"loss": 0.5064,
"num_tokens": 290255858.0,
"step": 680
},
{
"epoch": 0.9345156889495225,
"grad_norm": 0.1943049409331778,
"learning_rate": 5.548093694303275e-06,
"loss": 0.4919,
"num_tokens": 294083350.0,
"step": 685
},
{
"epoch": 0.9413369713506139,
"grad_norm": 0.21891210362180955,
"learning_rate": 5.442294080993446e-06,
"loss": 0.5059,
"num_tokens": 297956377.0,
"step": 690
},
{
"epoch": 0.9481582537517054,
"grad_norm": 0.2045825966835107,
"learning_rate": 5.347729211734919e-06,
"loss": 0.5034,
"num_tokens": 301755872.0,
"step": 695
},
{
"epoch": 0.9549795361527967,
"grad_norm": 0.20096771321359547,
"learning_rate": 5.264447251649954e-06,
"loss": 0.5057,
"num_tokens": 305542730.0,
"step": 700
},
{
"epoch": 0.9618008185538881,
"grad_norm": 0.20627903017723062,
"learning_rate": 5.192490619089267e-06,
"loss": 0.49,
"num_tokens": 309426949.0,
"step": 705
},
{
"epoch": 0.9686221009549796,
"grad_norm": 0.20761322654529332,
"learning_rate": 5.1318959640269095e-06,
"loss": 0.5005,
"num_tokens": 313113611.0,
"step": 710
},
{
"epoch": 0.975443383356071,
"grad_norm": 0.2108636029151623,
"learning_rate": 5.082694149393189e-06,
"loss": 0.5114,
"num_tokens": 316867560.0,
"step": 715
},
{
"epoch": 0.9822646657571623,
"grad_norm": 0.2031630473017727,
"learning_rate": 5.044910235355121e-06,
"loss": 0.4974,
"num_tokens": 320667324.0,
"step": 720
},
{
"epoch": 0.9890859481582538,
"grad_norm": 0.19315901995067375,
"learning_rate": 5.0185634665524255e-06,
"loss": 0.4934,
"num_tokens": 324513537.0,
"step": 725
},
{
"epoch": 0.9959072305593452,
"grad_norm": 0.1922060062694041,
"learning_rate": 5.003667262295572e-06,
"loss": 0.5015,
"num_tokens": 328290879.0,
"step": 730
},
{
"epoch": 1.0,
"num_tokens": 330600785.0,
"step": 733,
"total_flos": 1147472759488512.0,
"train_loss": 0.30104584323107625,
"train_runtime": 3582.5752,
"train_samples_per_second": 26.164,
"train_steps_per_second": 0.205
}
],
"logging_steps": 5,
"max_steps": 733,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1147472759488512.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}