David-stage1 / thinking_avg5 /trainer_state.json
SFTJBD's picture
upload thinking_avg5 checkpoint-940
8ba48ed verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 940,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2127659574468085,
"grad_norm": 15.687945365905762,
"learning_rate": 9.574468085106384e-07,
"loss": 2.1512,
"step": 10
},
{
"epoch": 0.425531914893617,
"grad_norm": 5.410861492156982,
"learning_rate": 2.021276595744681e-06,
"loss": 2.0467,
"step": 20
},
{
"epoch": 0.6382978723404256,
"grad_norm": 5.177760601043701,
"learning_rate": 3.0851063829787237e-06,
"loss": 1.8168,
"step": 30
},
{
"epoch": 0.851063829787234,
"grad_norm": 4.370819568634033,
"learning_rate": 4.148936170212766e-06,
"loss": 1.5723,
"step": 40
},
{
"epoch": 1.0638297872340425,
"grad_norm": 10.194955825805664,
"learning_rate": 5.212765957446809e-06,
"loss": 1.4206,
"step": 50
},
{
"epoch": 1.2765957446808511,
"grad_norm": 4.746403217315674,
"learning_rate": 6.276595744680851e-06,
"loss": 1.3571,
"step": 60
},
{
"epoch": 1.4893617021276595,
"grad_norm": 7.684877395629883,
"learning_rate": 7.340425531914894e-06,
"loss": 1.2962,
"step": 70
},
{
"epoch": 1.702127659574468,
"grad_norm": 4.916018486022949,
"learning_rate": 8.404255319148937e-06,
"loss": 1.2684,
"step": 80
},
{
"epoch": 1.9148936170212765,
"grad_norm": 4.1153740882873535,
"learning_rate": 9.46808510638298e-06,
"loss": 1.2722,
"step": 90
},
{
"epoch": 2.127659574468085,
"grad_norm": 4.440123558044434,
"learning_rate": 9.999138160172624e-06,
"loss": 1.202,
"step": 100
},
{
"epoch": 2.3404255319148937,
"grad_norm": 5.588274002075195,
"learning_rate": 9.99224522409411e-06,
"loss": 1.1704,
"step": 110
},
{
"epoch": 2.5531914893617023,
"grad_norm": 4.915806770324707,
"learning_rate": 9.97846885608885e-06,
"loss": 1.1469,
"step": 120
},
{
"epoch": 2.7659574468085104,
"grad_norm": 15.513427734375,
"learning_rate": 9.957828051355817e-06,
"loss": 1.1509,
"step": 130
},
{
"epoch": 2.978723404255319,
"grad_norm": 3.745392322540283,
"learning_rate": 9.930351269950144e-06,
"loss": 1.1568,
"step": 140
},
{
"epoch": 3.1914893617021276,
"grad_norm": 4.616119861602783,
"learning_rate": 9.896076397541676e-06,
"loss": 1.0829,
"step": 150
},
{
"epoch": 3.404255319148936,
"grad_norm": 3.5734570026397705,
"learning_rate": 9.855050693177286e-06,
"loss": 1.0624,
"step": 160
},
{
"epoch": 3.617021276595745,
"grad_norm": 4.119589328765869,
"learning_rate": 9.807330724118906e-06,
"loss": 1.0512,
"step": 170
},
{
"epoch": 3.829787234042553,
"grad_norm": 4.024175643920898,
"learning_rate": 9.752982287847193e-06,
"loss": 1.0489,
"step": 180
},
{
"epoch": 4.042553191489362,
"grad_norm": 2.776615858078003,
"learning_rate": 9.692080321338317e-06,
"loss": 1.0322,
"step": 190
},
{
"epoch": 4.25531914893617,
"grad_norm": 10.920818328857422,
"learning_rate": 9.624708797739002e-06,
"loss": 0.9571,
"step": 200
},
{
"epoch": 4.468085106382979,
"grad_norm": 3.562532901763916,
"learning_rate": 9.550960610582251e-06,
"loss": 0.9848,
"step": 210
},
{
"epoch": 4.680851063829787,
"grad_norm": 7.035646915435791,
"learning_rate": 9.47093744570344e-06,
"loss": 0.9681,
"step": 220
},
{
"epoch": 4.8936170212765955,
"grad_norm": 12.33655071258545,
"learning_rate": 9.384749641033358e-06,
"loss": 0.9626,
"step": 230
},
{
"epoch": 5.1063829787234045,
"grad_norm": 4.497727394104004,
"learning_rate": 9.292516034461517e-06,
"loss": 0.9159,
"step": 240
},
{
"epoch": 5.319148936170213,
"grad_norm": 2.9264068603515625,
"learning_rate": 9.194363799979517e-06,
"loss": 0.8697,
"step": 250
},
{
"epoch": 5.531914893617021,
"grad_norm": 4.538867950439453,
"learning_rate": 9.090428272330381e-06,
"loss": 0.8683,
"step": 260
},
{
"epoch": 5.74468085106383,
"grad_norm": 3.0127322673797607,
"learning_rate": 8.980852760405645e-06,
"loss": 0.8716,
"step": 270
},
{
"epoch": 5.957446808510638,
"grad_norm": 3.7382822036743164,
"learning_rate": 8.865788349647496e-06,
"loss": 0.8814,
"step": 280
},
{
"epoch": 6.170212765957447,
"grad_norm": 4.30864953994751,
"learning_rate": 8.745393693728395e-06,
"loss": 0.8087,
"step": 290
},
{
"epoch": 6.382978723404255,
"grad_norm": 5.579770565032959,
"learning_rate": 8.619834795795458e-06,
"loss": 0.7699,
"step": 300
},
{
"epoch": 6.595744680851064,
"grad_norm": 3.771517276763916,
"learning_rate": 8.489284779581179e-06,
"loss": 0.7836,
"step": 310
},
{
"epoch": 6.808510638297872,
"grad_norm": 2.9070560932159424,
"learning_rate": 8.353923650696119e-06,
"loss": 0.7645,
"step": 320
},
{
"epoch": 7.0212765957446805,
"grad_norm": 4.816034317016602,
"learning_rate": 8.213938048432697e-06,
"loss": 0.7675,
"step": 330
},
{
"epoch": 7.23404255319149,
"grad_norm": 3.0380640029907227,
"learning_rate": 8.069520988422292e-06,
"loss": 0.6649,
"step": 340
},
{
"epoch": 7.446808510638298,
"grad_norm": 4.518421649932861,
"learning_rate": 7.920871596500473e-06,
"loss": 0.6755,
"step": 350
},
{
"epoch": 7.659574468085106,
"grad_norm": 3.329987049102783,
"learning_rate": 7.768194834147362e-06,
"loss": 0.6654,
"step": 360
},
{
"epoch": 7.872340425531915,
"grad_norm": 6.515069484710693,
"learning_rate": 7.611701215881635e-06,
"loss": 0.681,
"step": 370
},
{
"epoch": 8.085106382978724,
"grad_norm": 5.377772808074951,
"learning_rate": 7.4516065189978625e-06,
"loss": 0.6258,
"step": 380
},
{
"epoch": 8.297872340425531,
"grad_norm": 4.005964279174805,
"learning_rate": 7.288131486047414e-06,
"loss": 0.5657,
"step": 390
},
{
"epoch": 8.51063829787234,
"grad_norm": 4.6427154541015625,
"learning_rate": 7.121501520473137e-06,
"loss": 0.565,
"step": 400
},
{
"epoch": 8.72340425531915,
"grad_norm": 3.043632984161377,
"learning_rate": 6.9519463758174745e-06,
"loss": 0.5643,
"step": 410
},
{
"epoch": 8.936170212765958,
"grad_norm": 3.418354034423828,
"learning_rate": 6.77969983893257e-06,
"loss": 0.5561,
"step": 420
},
{
"epoch": 9.148936170212766,
"grad_norm": 7.3563432693481445,
"learning_rate": 6.604999407629137e-06,
"loss": 0.4931,
"step": 430
},
{
"epoch": 9.361702127659575,
"grad_norm": 2.9742681980133057,
"learning_rate": 6.428085963208567e-06,
"loss": 0.4422,
"step": 440
},
{
"epoch": 9.574468085106384,
"grad_norm": 3.550511360168457,
"learning_rate": 6.249203438329799e-06,
"loss": 0.4542,
"step": 450
},
{
"epoch": 9.787234042553191,
"grad_norm": 3.406008720397949,
"learning_rate": 6.0685984806689055e-06,
"loss": 0.4514,
"step": 460
},
{
"epoch": 10.0,
"grad_norm": 3.04122257232666,
"learning_rate": 5.886520112835128e-06,
"loss": 0.4584,
"step": 470
},
{
"epoch": 10.212765957446809,
"grad_norm": 3.2944118976593018,
"learning_rate": 5.703219389012317e-06,
"loss": 0.3645,
"step": 480
},
{
"epoch": 10.425531914893616,
"grad_norm": 3.405515193939209,
"learning_rate": 5.518949048799176e-06,
"loss": 0.3609,
"step": 490
},
{
"epoch": 10.638297872340425,
"grad_norm": 3.199000358581543,
"learning_rate": 5.3339631687256085e-06,
"loss": 0.3614,
"step": 500
},
{
"epoch": 10.851063829787234,
"grad_norm": 3.5655815601348877,
"learning_rate": 5.148516811925684e-06,
"loss": 0.3531,
"step": 510
},
{
"epoch": 11.063829787234043,
"grad_norm": 3.419621467590332,
"learning_rate": 4.962865676450239e-06,
"loss": 0.3355,
"step": 520
},
{
"epoch": 11.27659574468085,
"grad_norm": 3.349931478500366,
"learning_rate": 4.777265742704039e-06,
"loss": 0.2772,
"step": 530
},
{
"epoch": 11.48936170212766,
"grad_norm": 3.3828463554382324,
"learning_rate": 4.591972920493638e-06,
"loss": 0.2713,
"step": 540
},
{
"epoch": 11.702127659574469,
"grad_norm": 4.099218845367432,
"learning_rate": 4.40724269617256e-06,
"loss": 0.2857,
"step": 550
},
{
"epoch": 11.914893617021276,
"grad_norm": 2.9123198986053467,
"learning_rate": 4.223329780370359e-06,
"loss": 0.2752,
"step": 560
},
{
"epoch": 12.127659574468085,
"grad_norm": 3.570718765258789,
"learning_rate": 4.04048775679127e-06,
"loss": 0.2451,
"step": 570
},
{
"epoch": 12.340425531914894,
"grad_norm": 3.2481930255889893,
"learning_rate": 3.858968732566685e-06,
"loss": 0.2136,
"step": 580
},
{
"epoch": 12.553191489361701,
"grad_norm": 3.2765166759490967,
"learning_rate": 3.6790229906435706e-06,
"loss": 0.2117,
"step": 590
},
{
"epoch": 12.76595744680851,
"grad_norm": 3.1073427200317383,
"learning_rate": 3.5008986446881088e-06,
"loss": 0.2117,
"step": 600
},
{
"epoch": 12.97872340425532,
"grad_norm": 3.716876745223999,
"learning_rate": 3.3248412969804065e-06,
"loss": 0.2113,
"step": 610
},
{
"epoch": 13.191489361702128,
"grad_norm": 2.914933443069458,
"learning_rate": 3.1510936997719557e-06,
"loss": 0.1751,
"step": 620
},
{
"epoch": 13.404255319148936,
"grad_norm": 2.8505117893218994,
"learning_rate": 2.9798954205727886e-06,
"loss": 0.1643,
"step": 630
},
{
"epoch": 13.617021276595745,
"grad_norm": 3.4922451972961426,
"learning_rate": 2.811482511829842e-06,
"loss": 0.1669,
"step": 640
},
{
"epoch": 13.829787234042554,
"grad_norm": 2.6760811805725098,
"learning_rate": 2.6460871854519594e-06,
"loss": 0.1588,
"step": 650
},
{
"epoch": 14.042553191489361,
"grad_norm": 2.973904848098755,
"learning_rate": 2.483937492630345e-06,
"loss": 0.159,
"step": 660
},
{
"epoch": 14.25531914893617,
"grad_norm": 2.8486647605895996,
"learning_rate": 2.3252570093959e-06,
"loss": 0.1317,
"step": 670
},
{
"epoch": 14.46808510638298,
"grad_norm": 2.7211029529571533,
"learning_rate": 2.1702645283470238e-06,
"loss": 0.1284,
"step": 680
},
{
"epoch": 14.680851063829786,
"grad_norm": 3.1470119953155518,
"learning_rate": 2.0191737569729492e-06,
"loss": 0.133,
"step": 690
},
{
"epoch": 14.893617021276595,
"grad_norm": 2.750349283218384,
"learning_rate": 1.872193022988526e-06,
"loss": 0.1279,
"step": 700
},
{
"epoch": 15.106382978723405,
"grad_norm": 3.102318048477173,
"learning_rate": 1.7295249870867898e-06,
"loss": 0.1127,
"step": 710
},
{
"epoch": 15.319148936170214,
"grad_norm": 2.9801065921783447,
"learning_rate": 1.5913663635053578e-06,
"loss": 0.1026,
"step": 720
},
{
"epoch": 15.53191489361702,
"grad_norm": 2.6068978309631348,
"learning_rate": 1.457907648791943e-06,
"loss": 0.102,
"step": 730
},
{
"epoch": 15.74468085106383,
"grad_norm": 2.8479115962982178,
"learning_rate": 1.329332859142967e-06,
"loss": 0.104,
"step": 740
},
{
"epoch": 15.957446808510639,
"grad_norm": 2.2816240787506104,
"learning_rate": 1.205819276677464e-06,
"loss": 0.1081,
"step": 750
},
{
"epoch": 16.170212765957448,
"grad_norm": 3.1890599727630615,
"learning_rate": 1.0875372049960697e-06,
"loss": 0.0938,
"step": 760
},
{
"epoch": 16.382978723404257,
"grad_norm": 2.4445719718933105,
"learning_rate": 9.746497343621857e-07,
"loss": 0.0807,
"step": 770
},
{
"epoch": 16.595744680851062,
"grad_norm": 2.440563678741455,
"learning_rate": 8.673125168290713e-07,
"loss": 0.0899,
"step": 780
},
{
"epoch": 16.80851063829787,
"grad_norm": 2.6344189643859863,
"learning_rate": 7.656735516229125e-07,
"loss": 0.0855,
"step": 790
},
{
"epoch": 17.02127659574468,
"grad_norm": 2.084998369216919,
"learning_rate": 6.698729810778065e-07,
"loss": 0.084,
"step": 800
},
{
"epoch": 17.23404255319149,
"grad_norm": 2.7588119506835938,
"learning_rate": 5.800428974040311e-07,
"loss": 0.0725,
"step": 810
},
{
"epoch": 17.4468085106383,
"grad_norm": 2.3213772773742676,
"learning_rate": 4.963071605560144e-07,
"loss": 0.0758,
"step": 820
},
{
"epoch": 17.659574468085108,
"grad_norm": 2.5637381076812744,
"learning_rate": 4.187812274511427e-07,
"loss": 0.0719,
"step": 830
},
{
"epoch": 17.872340425531917,
"grad_norm": 2.3422341346740723,
"learning_rate": 3.4757199277490106e-07,
"loss": 0.0709,
"step": 840
},
{
"epoch": 18.085106382978722,
"grad_norm": 1.787158727645874,
"learning_rate": 2.8277764159181484e-07,
"loss": 0.0697,
"step": 850
},
{
"epoch": 18.29787234042553,
"grad_norm": 1.931524634361267,
"learning_rate": 2.2448751396543788e-07,
"loss": 0.066,
"step": 860
},
{
"epoch": 18.51063829787234,
"grad_norm": 2.025407075881958,
"learning_rate": 1.7278198177405614e-07,
"loss": 0.0627,
"step": 870
},
{
"epoch": 18.72340425531915,
"grad_norm": 2.224090337753296,
"learning_rate": 1.2773233789193816e-07,
"loss": 0.0696,
"step": 880
},
{
"epoch": 18.93617021276596,
"grad_norm": 1.879887342453003,
"learning_rate": 8.940069788894389e-08,
"loss": 0.0617,
"step": 890
},
{
"epoch": 19.148936170212767,
"grad_norm": 1.786503553390503,
"learning_rate": 5.783991438403802e-08,
"loss": 0.0614,
"step": 900
},
{
"epoch": 19.361702127659573,
"grad_norm": 1.754013180732727,
"learning_rate": 3.309350417077972e-08,
"loss": 0.0664,
"step": 910
},
{
"epoch": 19.574468085106382,
"grad_norm": 1.7814531326293945,
"learning_rate": 1.5195588215283773e-08,
"loss": 0.0615,
"step": 920
},
{
"epoch": 19.78723404255319,
"grad_norm": 1.6456085443496704,
"learning_rate": 4.170844609387992e-09,
"loss": 0.0549,
"step": 930
},
{
"epoch": 20.0,
"grad_norm": 1.808290719985962,
"learning_rate": 3.447454388127991e-11,
"loss": 0.0604,
"step": 940
}
],
"logging_steps": 10,
"max_steps": 940,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.970741280306627e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}