sdar_4b_trace_sft_openr1math / trainer_state.json
autoprogrammer's picture
upload sft final model
6438915 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 369,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0408997955010225,
"grad_norm": 7.998523235321045,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.5733,
"step": 5
},
{
"epoch": 0.081799591002045,
"grad_norm": 9.153129577636719,
"learning_rate": 7.500000000000001e-06,
"loss": 0.5196,
"step": 10
},
{
"epoch": 0.12269938650306748,
"grad_norm": 8.34139347076416,
"learning_rate": 9.999225623611208e-06,
"loss": 0.5806,
"step": 15
},
{
"epoch": 0.16359918200409,
"grad_norm": 7.1141204833984375,
"learning_rate": 9.990516643685222e-06,
"loss": 0.4941,
"step": 20
},
{
"epoch": 0.20449897750511248,
"grad_norm": 8.44301700592041,
"learning_rate": 9.972147627352593e-06,
"loss": 0.6074,
"step": 25
},
{
"epoch": 0.24539877300613497,
"grad_norm": 6.837522029876709,
"learning_rate": 9.944154131125643e-06,
"loss": 0.6176,
"step": 30
},
{
"epoch": 0.28629856850715746,
"grad_norm": 6.722048282623291,
"learning_rate": 9.906590341416725e-06,
"loss": 0.6011,
"step": 35
},
{
"epoch": 0.32719836400818,
"grad_norm": 6.896355152130127,
"learning_rate": 9.859528969650739e-06,
"loss": 0.5717,
"step": 40
},
{
"epoch": 0.36809815950920244,
"grad_norm": 7.0612945556640625,
"learning_rate": 9.803061111519188e-06,
"loss": 0.5783,
"step": 45
},
{
"epoch": 0.40899795501022496,
"grad_norm": 6.113592147827148,
"learning_rate": 9.737296070648187e-06,
"loss": 0.6013,
"step": 50
},
{
"epoch": 0.4498977505112474,
"grad_norm": 6.054924488067627,
"learning_rate": 9.66236114702178e-06,
"loss": 0.612,
"step": 55
},
{
"epoch": 0.49079754601226994,
"grad_norm": 5.854165554046631,
"learning_rate": 9.57840139057007e-06,
"loss": 0.5835,
"step": 60
},
{
"epoch": 0.5316973415132924,
"grad_norm": 6.945219993591309,
"learning_rate": 9.485579320399184e-06,
"loss": 0.5709,
"step": 65
},
{
"epoch": 0.5725971370143149,
"grad_norm": 6.699445724487305,
"learning_rate": 9.384074610206495e-06,
"loss": 0.6038,
"step": 70
},
{
"epoch": 0.6134969325153374,
"grad_norm": 6.41201114654541,
"learning_rate": 9.274083740490097e-06,
"loss": 0.6221,
"step": 75
},
{
"epoch": 0.65439672801636,
"grad_norm": 5.748655796051025,
"learning_rate": 9.155819618225707e-06,
"loss": 0.6027,
"step": 80
},
{
"epoch": 0.6952965235173824,
"grad_norm": 6.205794334411621,
"learning_rate": 9.029511164747175e-06,
"loss": 0.5737,
"step": 85
},
{
"epoch": 0.7361963190184049,
"grad_norm": 6.745859622955322,
"learning_rate": 8.895402872628352e-06,
"loss": 0.6281,
"step": 90
},
{
"epoch": 0.7770961145194274,
"grad_norm": 5.8365302085876465,
"learning_rate": 8.753754332424047e-06,
"loss": 0.5721,
"step": 95
},
{
"epoch": 0.8179959100204499,
"grad_norm": 6.5638532638549805,
"learning_rate": 8.604839730186125e-06,
"loss": 0.6225,
"step": 100
},
{
"epoch": 0.8588957055214724,
"grad_norm": 9.908438682556152,
"learning_rate": 8.448947316727444e-06,
"loss": 0.6062,
"step": 105
},
{
"epoch": 0.8997955010224948,
"grad_norm": 7.083659648895264,
"learning_rate": 8.286378849660895e-06,
"loss": 0.5851,
"step": 110
},
{
"epoch": 0.9406952965235174,
"grad_norm": 5.502669811248779,
"learning_rate": 8.117449009293668e-06,
"loss": 0.5388,
"step": 115
},
{
"epoch": 0.9815950920245399,
"grad_norm": 5.813559055328369,
"learning_rate": 7.942484789507282e-06,
"loss": 0.6135,
"step": 120
},
{
"epoch": 1.016359918200409,
"grad_norm": 4.387826442718506,
"learning_rate": 7.76182486480253e-06,
"loss": 0.3565,
"step": 125
},
{
"epoch": 1.0572597137014315,
"grad_norm": 3.389594793319702,
"learning_rate": 7.57581893473448e-06,
"loss": 0.185,
"step": 130
},
{
"epoch": 1.098159509202454,
"grad_norm": 3.6928842067718506,
"learning_rate": 7.38482704700653e-06,
"loss": 0.1724,
"step": 135
},
{
"epoch": 1.1390593047034765,
"grad_norm": 4.740786552429199,
"learning_rate": 7.1892189005337494e-06,
"loss": 0.1397,
"step": 140
},
{
"epoch": 1.179959100204499,
"grad_norm": 6.11871862411499,
"learning_rate": 6.989373129824605e-06,
"loss": 0.1564,
"step": 145
},
{
"epoch": 1.2208588957055215,
"grad_norm": 4.457100868225098,
"learning_rate": 6.785676572066225e-06,
"loss": 0.1557,
"step": 150
},
{
"epoch": 1.261758691206544,
"grad_norm": 4.849860191345215,
"learning_rate": 6.5785235183319266e-06,
"loss": 0.142,
"step": 155
},
{
"epoch": 1.3026584867075663,
"grad_norm": 4.870535373687744,
"learning_rate": 6.368314950360416e-06,
"loss": 0.1623,
"step": 160
},
{
"epoch": 1.343558282208589,
"grad_norm": 4.23346471786499,
"learning_rate": 6.155457764384001e-06,
"loss": 0.1615,
"step": 165
},
{
"epoch": 1.3844580777096114,
"grad_norm": 4.38654088973999,
"learning_rate": 5.940363983508257e-06,
"loss": 0.1759,
"step": 170
},
{
"epoch": 1.425357873210634,
"grad_norm": 4.874055862426758,
"learning_rate": 5.723449960167703e-06,
"loss": 0.1886,
"step": 175
},
{
"epoch": 1.4662576687116564,
"grad_norm": 4.817355155944824,
"learning_rate": 5.50513557020129e-06,
"loss": 0.165,
"step": 180
},
{
"epoch": 1.5071574642126788,
"grad_norm": 4.6559953689575195,
"learning_rate": 5.285843400107722e-06,
"loss": 0.1718,
"step": 185
},
{
"epoch": 1.5480572597137015,
"grad_norm": 4.32926082611084,
"learning_rate": 5.065997929053795e-06,
"loss": 0.1644,
"step": 190
},
{
"epoch": 1.588957055214724,
"grad_norm": 4.097021102905273,
"learning_rate": 4.846024707219149e-06,
"loss": 0.1648,
"step": 195
},
{
"epoch": 1.6298568507157465,
"grad_norm": 5.297605037689209,
"learning_rate": 4.626349532067879e-06,
"loss": 0.1847,
"step": 200
},
{
"epoch": 1.670756646216769,
"grad_norm": 3.8317348957061768,
"learning_rate": 4.407397624141487e-06,
"loss": 0.1595,
"step": 205
},
{
"epoch": 1.7116564417177913,
"grad_norm": 5.160454750061035,
"learning_rate": 4.1895928039685635e-06,
"loss": 0.1692,
"step": 210
},
{
"epoch": 1.752556237218814,
"grad_norm": 3.6158623695373535,
"learning_rate": 3.973356671684455e-06,
"loss": 0.1531,
"step": 215
},
{
"epoch": 1.7934560327198366,
"grad_norm": 4.5961503982543945,
"learning_rate": 3.7591077909488817e-06,
"loss": 0.1621,
"step": 220
},
{
"epoch": 1.834355828220859,
"grad_norm": 4.000308036804199,
"learning_rate": 3.54726087874122e-06,
"loss": 0.1456,
"step": 225
},
{
"epoch": 1.8752556237218814,
"grad_norm": 4.329658031463623,
"learning_rate": 3.3382260026017027e-06,
"loss": 0.1514,
"step": 230
},
{
"epoch": 1.9161554192229038,
"grad_norm": 4.9972028732299805,
"learning_rate": 3.132407786872442e-06,
"loss": 0.1659,
"step": 235
},
{
"epoch": 1.9570552147239264,
"grad_norm": 4.748896598815918,
"learning_rate": 2.9302046294747498e-06,
"loss": 0.148,
"step": 240
},
{
"epoch": 1.997955010224949,
"grad_norm": 5.426475524902344,
"learning_rate": 2.7320079307387788e-06,
"loss": 0.1588,
"step": 245
},
{
"epoch": 2.032719836400818,
"grad_norm": 2.0547497272491455,
"learning_rate": 2.538201335778289e-06,
"loss": 0.0437,
"step": 250
},
{
"epoch": 2.0736196319018405,
"grad_norm": 1.7896406650543213,
"learning_rate": 2.3491599918769854e-06,
"loss": 0.0408,
"step": 255
},
{
"epoch": 2.114519427402863,
"grad_norm": 3.0377840995788574,
"learning_rate": 2.165249822323943e-06,
"loss": 0.0416,
"step": 260
},
{
"epoch": 2.1554192229038853,
"grad_norm": 2.5748441219329834,
"learning_rate": 1.9868268181037186e-06,
"loss": 0.039,
"step": 265
},
{
"epoch": 2.196319018404908,
"grad_norm": 3.0743508338928223,
"learning_rate": 1.814236348812211e-06,
"loss": 0.0536,
"step": 270
},
{
"epoch": 2.2372188139059306,
"grad_norm": 2.9512462615966797,
"learning_rate": 1.6478124941321123e-06,
"loss": 0.0385,
"step": 275
},
{
"epoch": 2.278118609406953,
"grad_norm": 3.284522533416748,
"learning_rate": 1.4878773971620076e-06,
"loss": 0.0348,
"step": 280
},
{
"epoch": 2.3190184049079754,
"grad_norm": 1.6712443828582764,
"learning_rate": 1.3347406408508695e-06,
"loss": 0.0248,
"step": 285
},
{
"epoch": 2.359918200408998,
"grad_norm": 3.1134347915649414,
"learning_rate": 1.1886986487449476e-06,
"loss": 0.0391,
"step": 290
},
{
"epoch": 2.40081799591002,
"grad_norm": 2.1109068393707275,
"learning_rate": 1.0500341112070605e-06,
"loss": 0.0369,
"step": 295
},
{
"epoch": 2.441717791411043,
"grad_norm": 2.5719072818756104,
"learning_rate": 9.190154382188921e-07,
"loss": 0.035,
"step": 300
},
{
"epoch": 2.4826175869120655,
"grad_norm": 2.971503496170044,
"learning_rate": 7.958962398255293e-07,
"loss": 0.0312,
"step": 305
},
{
"epoch": 2.523517382413088,
"grad_norm": 3.7453150749206543,
"learning_rate": 6.809148352279182e-07,
"loss": 0.0381,
"step": 310
},
{
"epoch": 2.5644171779141103,
"grad_norm": 2.350411891937256,
"learning_rate": 5.742937914734853e-07,
"loss": 0.0317,
"step": 315
},
{
"epoch": 2.6053169734151327,
"grad_norm": 2.607921838760376,
"learning_rate": 4.762394926378477e-07,
"loss": 0.0342,
"step": 320
},
{
"epoch": 2.6462167689161555,
"grad_norm": 2.516187906265259,
"learning_rate": 3.869417403315856e-07,
"loss": 0.0367,
"step": 325
},
{
"epoch": 2.687116564417178,
"grad_norm": 3.3790230751037598,
"learning_rate": 3.065733863053072e-07,
"loss": 0.0321,
"step": 330
},
{
"epoch": 2.7280163599182004,
"grad_norm": 1.9204952716827393,
"learning_rate": 2.3528999786421758e-07,
"loss": 0.0251,
"step": 335
},
{
"epoch": 2.7689161554192228,
"grad_norm": 2.1479649543762207,
"learning_rate": 1.7322955673980678e-07,
"loss": 0.0351,
"step": 340
},
{
"epoch": 2.809815950920245,
"grad_norm": 4.126728057861328,
"learning_rate": 1.2051219200156394e-07,
"loss": 0.0439,
"step": 345
},
{
"epoch": 2.850715746421268,
"grad_norm": 2.021738052368164,
"learning_rate": 7.723994752570463e-08,
"loss": 0.0299,
"step": 350
},
{
"epoch": 2.8916155419222904,
"grad_norm": 3.108388662338257,
"learning_rate": 4.3496584471016125e-08,
"loss": 0.0396,
"step": 355
},
{
"epoch": 2.932515337423313,
"grad_norm": 2.0311710834503174,
"learning_rate": 1.9347419144180035e-08,
"loss": 0.0291,
"step": 360
},
{
"epoch": 2.9734151329243352,
"grad_norm": 3.245495557785034,
"learning_rate": 4.839196568388049e-09,
"loss": 0.034,
"step": 365
},
{
"epoch": 3.0,
"step": 369,
"total_flos": 1.1649211674300252e+18,
"train_loss": 0.2605089543634637,
"train_runtime": 3793.9194,
"train_samples_per_second": 3.09,
"train_steps_per_second": 0.097
}
],
"logging_steps": 5,
"max_steps": 369,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1649211674300252e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}