One-Shot-CFT-Math-Qwen-14B / trainer_state.json
ubowang's picture
Upload folder using huggingface_hub
48cc69b verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 59.7710843373494,
"eval_steps": 10,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.7710843373493976,
"grad_norm": 3.914616675051536,
"learning_rate": 5.000000000000001e-07,
"loss": 0.6485056281089783,
"memory(GiB)": 76.1,
"step": 1,
"token_acc": 0.8288129939443704,
"train_speed(iter/s)": 0.00248
},
{
"epoch": 1.7710843373493976,
"grad_norm": 7.135952401058273,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.306803822517395,
"memory(GiB)": 77.59,
"step": 2,
"token_acc": 0.8355539032171605,
"train_speed(iter/s)": 0.002291
},
{
"epoch": 2.7710843373493974,
"grad_norm": 7.25317611851469,
"learning_rate": 1.5e-06,
"loss": 1.2851154804229736,
"memory(GiB)": 77.59,
"step": 3,
"token_acc": 0.834915035973237,
"train_speed(iter/s)": 0.002247
},
{
"epoch": 3.7710843373493974,
"grad_norm": 6.865015999508222,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.290574073791504,
"memory(GiB)": 77.59,
"step": 4,
"token_acc": 0.8338767344065504,
"train_speed(iter/s)": 0.002214
},
{
"epoch": 4.771084337349397,
"grad_norm": 6.57767604145361,
"learning_rate": 2.5e-06,
"loss": 1.261496901512146,
"memory(GiB)": 77.59,
"step": 5,
"token_acc": 0.8348027461456405,
"train_speed(iter/s)": 0.0022
},
{
"epoch": 5.771084337349397,
"grad_norm": 5.131561904948073,
"learning_rate": 3e-06,
"loss": 1.20093834400177,
"memory(GiB)": 77.59,
"step": 6,
"token_acc": 0.8447540089861448,
"train_speed(iter/s)": 0.002191
},
{
"epoch": 6.771084337349397,
"grad_norm": 4.169361922366313,
"learning_rate": 3.5e-06,
"loss": 1.118795394897461,
"memory(GiB)": 77.59,
"step": 7,
"token_acc": 0.845426595733518,
"train_speed(iter/s)": 0.002187
},
{
"epoch": 7.771084337349397,
"grad_norm": 4.513043229540615,
"learning_rate": 4.000000000000001e-06,
"loss": 1.0338046550750732,
"memory(GiB)": 77.59,
"step": 8,
"token_acc": 0.8524168712340995,
"train_speed(iter/s)": 0.00218
},
{
"epoch": 8.771084337349398,
"grad_norm": 5.185455486257246,
"learning_rate": 4.5e-06,
"loss": 0.9289287328720093,
"memory(GiB)": 77.59,
"step": 9,
"token_acc": 0.8699944903581267,
"train_speed(iter/s)": 0.002177
},
{
"epoch": 9.771084337349398,
"grad_norm": 3.6878315103563324,
"learning_rate": 5e-06,
"loss": 0.9007519483566284,
"memory(GiB)": 77.59,
"step": 10,
"token_acc": 0.8636064441638881,
"train_speed(iter/s)": 0.002176
},
{
"epoch": 10.771084337349398,
"grad_norm": 3.153907065444448,
"learning_rate": 4.999658262481173e-06,
"loss": 0.8321974277496338,
"memory(GiB)": 77.59,
"step": 11,
"token_acc": 0.8710321094284695,
"train_speed(iter/s)": 0.002152
},
{
"epoch": 11.771084337349398,
"grad_norm": 2.203676742176422,
"learning_rate": 4.998633143352315e-06,
"loss": 0.7872496247291565,
"memory(GiB)": 77.59,
"step": 12,
"token_acc": 0.8761502671032225,
"train_speed(iter/s)": 0.002149
},
{
"epoch": 12.771084337349398,
"grad_norm": 2.4327146254784084,
"learning_rate": 4.9969249228707625e-06,
"loss": 0.7419657707214355,
"memory(GiB)": 77.59,
"step": 13,
"token_acc": 0.8786807478287534,
"train_speed(iter/s)": 0.002148
},
{
"epoch": 13.771084337349398,
"grad_norm": 1.8937402046126173,
"learning_rate": 4.994534068046936e-06,
"loss": 0.709729790687561,
"memory(GiB)": 77.59,
"step": 14,
"token_acc": 0.8879921788137868,
"train_speed(iter/s)": 0.002151
},
{
"epoch": 14.771084337349398,
"grad_norm": 1.4897225411877277,
"learning_rate": 4.991461232516675e-06,
"loss": 0.676007866859436,
"memory(GiB)": 77.59,
"step": 15,
"token_acc": 0.8962045012503473,
"train_speed(iter/s)": 0.00215
},
{
"epoch": 15.771084337349398,
"grad_norm": 1.297539359495755,
"learning_rate": 4.987707256362529e-06,
"loss": 0.6474949717521667,
"memory(GiB)": 77.59,
"step": 16,
"token_acc": 0.8947659474239368,
"train_speed(iter/s)": 0.002148
},
{
"epoch": 16.771084337349397,
"grad_norm": 0.7199351026372981,
"learning_rate": 4.983273165884096e-06,
"loss": 0.6222354173660278,
"memory(GiB)": 77.59,
"step": 17,
"token_acc": 0.9017643862580863,
"train_speed(iter/s)": 0.00215
},
{
"epoch": 17.771084337349397,
"grad_norm": 1.4007282443863567,
"learning_rate": 4.978160173317439e-06,
"loss": 0.6050044298171997,
"memory(GiB)": 77.59,
"step": 18,
"token_acc": 0.899228721854176,
"train_speed(iter/s)": 0.00215
},
{
"epoch": 18.771084337349397,
"grad_norm": 1.4351431315774181,
"learning_rate": 4.972369676503672e-06,
"loss": 0.5903453230857849,
"memory(GiB)": 77.59,
"step": 19,
"token_acc": 0.9062169786627631,
"train_speed(iter/s)": 0.002149
},
{
"epoch": 19.771084337349397,
"grad_norm": 1.1929137455373535,
"learning_rate": 4.965903258506806e-06,
"loss": 0.5688210725784302,
"memory(GiB)": 77.59,
"step": 20,
"token_acc": 0.9079767112253118,
"train_speed(iter/s)": 0.002148
},
{
"epoch": 20.771084337349397,
"grad_norm": 1.1732494606890316,
"learning_rate": 4.9587626871809564e-06,
"loss": 0.543393611907959,
"memory(GiB)": 77.59,
"step": 21,
"token_acc": 0.9126149389091825,
"train_speed(iter/s)": 0.002136
},
{
"epoch": 21.771084337349397,
"grad_norm": 1.0573555070054292,
"learning_rate": 4.950949914687024e-06,
"loss": 0.5107942223548889,
"memory(GiB)": 77.59,
"step": 22,
"token_acc": 0.913213028660819,
"train_speed(iter/s)": 0.002136
},
{
"epoch": 22.771084337349397,
"grad_norm": 0.9119223186948517,
"learning_rate": 4.942467076958999e-06,
"loss": 0.48675835132598877,
"memory(GiB)": 77.59,
"step": 23,
"token_acc": 0.916867432182973,
"train_speed(iter/s)": 0.002137
},
{
"epoch": 23.771084337349397,
"grad_norm": 1.0782070696527255,
"learning_rate": 4.933316493120015e-06,
"loss": 0.5053229331970215,
"memory(GiB)": 77.59,
"step": 24,
"token_acc": 0.9218187118672303,
"train_speed(iter/s)": 0.002139
},
{
"epoch": 24.771084337349397,
"grad_norm": 0.9655818937470999,
"learning_rate": 4.923500664848327e-06,
"loss": 0.5017877817153931,
"memory(GiB)": 77.59,
"step": 25,
"token_acc": 0.9221839217084671,
"train_speed(iter/s)": 0.002138
},
{
"epoch": 25.771084337349397,
"grad_norm": 0.9404583758277602,
"learning_rate": 4.913022275693372e-06,
"loss": 0.4680900275707245,
"memory(GiB)": 77.59,
"step": 26,
"token_acc": 0.9232181127591402,
"train_speed(iter/s)": 0.002139
},
{
"epoch": 26.771084337349397,
"grad_norm": 0.9566563796831183,
"learning_rate": 4.901884190342121e-06,
"loss": 0.4565889239311218,
"memory(GiB)": 77.59,
"step": 27,
"token_acc": 0.9250697693507346,
"train_speed(iter/s)": 0.002138
},
{
"epoch": 27.771084337349397,
"grad_norm": 0.7703532186007063,
"learning_rate": 4.890089453835894e-06,
"loss": 0.43708479404449463,
"memory(GiB)": 77.59,
"step": 28,
"token_acc": 0.9259928949775574,
"train_speed(iter/s)": 0.002139
},
{
"epoch": 28.771084337349397,
"grad_norm": 0.9233409831228016,
"learning_rate": 4.8776412907378845e-06,
"loss": 0.4270290732383728,
"memory(GiB)": 77.59,
"step": 29,
"token_acc": 0.9246415671992716,
"train_speed(iter/s)": 0.00214
},
{
"epoch": 29.771084337349397,
"grad_norm": 0.9965352268428146,
"learning_rate": 4.864543104251587e-06,
"loss": 0.41402751207351685,
"memory(GiB)": 77.59,
"step": 30,
"token_acc": 0.9240786009778112,
"train_speed(iter/s)": 0.002139
},
{
"epoch": 30.771084337349397,
"grad_norm": 0.9358295902337755,
"learning_rate": 4.850798475290403e-06,
"loss": 0.39457955956459045,
"memory(GiB)": 77.59,
"step": 31,
"token_acc": 0.9368182401505754,
"train_speed(iter/s)": 0.002131
},
{
"epoch": 31.771084337349397,
"grad_norm": 0.9813539670443086,
"learning_rate": 4.836411161498653e-06,
"loss": 0.38136690855026245,
"memory(GiB)": 77.59,
"step": 32,
"token_acc": 0.9375963637091769,
"train_speed(iter/s)": 0.002133
},
{
"epoch": 32.7710843373494,
"grad_norm": 0.4679577449591144,
"learning_rate": 4.821385096224268e-06,
"loss": 0.36845850944519043,
"memory(GiB)": 77.59,
"step": 33,
"token_acc": 0.9348571791559913,
"train_speed(iter/s)": 0.002134
},
{
"epoch": 33.7710843373494,
"grad_norm": 0.9212511288160155,
"learning_rate": 4.8057243874434625e-06,
"loss": 0.35925933718681335,
"memory(GiB)": 77.59,
"step": 34,
"token_acc": 0.9376081366560561,
"train_speed(iter/s)": 0.002133
},
{
"epoch": 34.7710843373494,
"grad_norm": 1.0561026403132139,
"learning_rate": 4.789433316637644e-06,
"loss": 0.3401709794998169,
"memory(GiB)": 77.59,
"step": 35,
"token_acc": 0.9403078788403664,
"train_speed(iter/s)": 0.002134
},
{
"epoch": 35.7710843373494,
"grad_norm": 0.936417548403036,
"learning_rate": 4.772516337622907e-06,
"loss": 0.3241081237792969,
"memory(GiB)": 77.59,
"step": 36,
"token_acc": 0.9484973388290848,
"train_speed(iter/s)": 0.002135
},
{
"epoch": 36.7710843373494,
"grad_norm": 1.116983283196546,
"learning_rate": 4.754978075332398e-06,
"loss": 0.30902814865112305,
"memory(GiB)": 77.59,
"step": 37,
"token_acc": 0.9476456995060315,
"train_speed(iter/s)": 0.002135
},
{
"epoch": 37.7710843373494,
"grad_norm": 0.9993160029154087,
"learning_rate": 4.736823324551909e-06,
"loss": 0.308858722448349,
"memory(GiB)": 77.59,
"step": 38,
"token_acc": 0.9504927069901695,
"train_speed(iter/s)": 0.002135
},
{
"epoch": 38.7710843373494,
"grad_norm": 0.9122824683879396,
"learning_rate": 4.71805704860903e-06,
"loss": 0.27843528985977173,
"memory(GiB)": 77.59,
"step": 39,
"token_acc": 0.9496681563352376,
"train_speed(iter/s)": 0.002136
},
{
"epoch": 39.7710843373494,
"grad_norm": 1.009388916794573,
"learning_rate": 4.698684378016223e-06,
"loss": 0.25660544633865356,
"memory(GiB)": 77.59,
"step": 40,
"token_acc": 0.9552901130798869,
"train_speed(iter/s)": 0.002137
},
{
"epoch": 40.7710843373494,
"grad_norm": 1.1895642620511482,
"learning_rate": 4.678710609068193e-06,
"loss": 0.2424250692129135,
"memory(GiB)": 77.59,
"step": 41,
"token_acc": 0.9549429605785101,
"train_speed(iter/s)": 0.002131
},
{
"epoch": 41.7710843373494,
"grad_norm": 1.1193041843299223,
"learning_rate": 4.658141202393935e-06,
"loss": 0.23843874037265778,
"memory(GiB)": 77.59,
"step": 42,
"token_acc": 0.9608553608553608,
"train_speed(iter/s)": 0.002131
},
{
"epoch": 42.7710843373494,
"grad_norm": 1.110610217839956,
"learning_rate": 4.636981781463848e-06,
"loss": 0.210187166929245,
"memory(GiB)": 77.59,
"step": 43,
"token_acc": 0.9653150186596419,
"train_speed(iter/s)": 0.002132
},
{
"epoch": 43.7710843373494,
"grad_norm": 0.8486274696165886,
"learning_rate": 4.615238131052339e-06,
"loss": 0.21901345252990723,
"memory(GiB)": 77.59,
"step": 44,
"token_acc": 0.9653416261042005,
"train_speed(iter/s)": 0.002132
},
{
"epoch": 44.7710843373494,
"grad_norm": 1.0423804412798912,
"learning_rate": 4.592916195656322e-06,
"loss": 0.17482870817184448,
"memory(GiB)": 77.59,
"step": 45,
"token_acc": 0.9674520144027672,
"train_speed(iter/s)": 0.002133
},
{
"epoch": 45.7710843373494,
"grad_norm": 1.2841486053816782,
"learning_rate": 4.570022077870051e-06,
"loss": 0.18200109899044037,
"memory(GiB)": 77.59,
"step": 46,
"token_acc": 0.9708482572798199,
"train_speed(iter/s)": 0.002133
},
{
"epoch": 46.7710843373494,
"grad_norm": 1.172978940255451,
"learning_rate": 4.546562036716732e-06,
"loss": 0.15860611200332642,
"memory(GiB)": 77.59,
"step": 47,
"token_acc": 0.9780445141443995,
"train_speed(iter/s)": 0.002133
},
{
"epoch": 47.7710843373494,
"grad_norm": 1.2360197347924713,
"learning_rate": 4.522542485937369e-06,
"loss": 0.14376184344291687,
"memory(GiB)": 77.59,
"step": 48,
"token_acc": 0.9766241102700259,
"train_speed(iter/s)": 0.002135
},
{
"epoch": 48.7710843373494,
"grad_norm": 0.6385686989268172,
"learning_rate": 4.497969992237312e-06,
"loss": 0.13206440210342407,
"memory(GiB)": 77.59,
"step": 49,
"token_acc": 0.9818039023693685,
"train_speed(iter/s)": 0.002135
},
{
"epoch": 49.7710843373494,
"grad_norm": 1.2901120485113573,
"learning_rate": 4.472851273490985e-06,
"loss": 0.11426319181919098,
"memory(GiB)": 77.59,
"step": 50,
"token_acc": 0.9836425932423786,
"train_speed(iter/s)": 0.002135
},
{
"epoch": 50.7710843373494,
"grad_norm": 1.2068228907293697,
"learning_rate": 4.4471931969052816e-06,
"loss": 0.09582371264696121,
"memory(GiB)": 77.59,
"step": 51,
"token_acc": 0.9849937986991469,
"train_speed(iter/s)": 0.00213
},
{
"epoch": 51.7710843373494,
"grad_norm": 1.1813520357475888,
"learning_rate": 4.421002777142148e-06,
"loss": 0.09163334965705872,
"memory(GiB)": 77.59,
"step": 52,
"token_acc": 0.985701349843115,
"train_speed(iter/s)": 0.00213
},
{
"epoch": 52.7710843373494,
"grad_norm": 1.1122597460598151,
"learning_rate": 4.394287174400838e-06,
"loss": 0.08439977467060089,
"memory(GiB)": 77.59,
"step": 53,
"token_acc": 0.9878925052136355,
"train_speed(iter/s)": 0.00213
},
{
"epoch": 53.7710843373494,
"grad_norm": 1.1253515530343927,
"learning_rate": 4.3670536924603855e-06,
"loss": 0.07314425706863403,
"memory(GiB)": 77.59,
"step": 54,
"token_acc": 0.9900777579782926,
"train_speed(iter/s)": 0.00213
},
{
"epoch": 54.7710843373494,
"grad_norm": 1.1033670330706447,
"learning_rate": 4.33930977668283e-06,
"loss": 0.06532438099384308,
"memory(GiB)": 77.59,
"step": 55,
"token_acc": 0.9898691656771185,
"train_speed(iter/s)": 0.00213
},
{
"epoch": 55.7710843373494,
"grad_norm": 1.1439476751450148,
"learning_rate": 4.311063011977723e-06,
"loss": 0.06318923085927963,
"memory(GiB)": 77.59,
"step": 56,
"token_acc": 0.9903093019267052,
"train_speed(iter/s)": 0.002131
},
{
"epoch": 56.7710843373494,
"grad_norm": 1.1421545710607997,
"learning_rate": 4.282321120728493e-06,
"loss": 0.05167176201939583,
"memory(GiB)": 77.59,
"step": 57,
"token_acc": 0.991860976682798,
"train_speed(iter/s)": 0.002131
},
{
"epoch": 57.7710843373494,
"grad_norm": 1.1132394753881205,
"learning_rate": 4.253091960681222e-06,
"loss": 0.05162680149078369,
"memory(GiB)": 77.59,
"step": 58,
"token_acc": 0.992666204277479,
"train_speed(iter/s)": 0.002132
},
{
"epoch": 58.7710843373494,
"grad_norm": 1.1383077361483038,
"learning_rate": 4.2233835227964145e-06,
"loss": 0.044882968068122864,
"memory(GiB)": 77.59,
"step": 59,
"token_acc": 0.9941307654265692,
"train_speed(iter/s)": 0.002132
},
{
"epoch": 59.7710843373494,
"grad_norm": 0.9459729284657843,
"learning_rate": 4.1932039290643534e-06,
"loss": 0.042324937880039215,
"memory(GiB)": 77.59,
"step": 60,
"token_acc": 0.9941203679909544,
"train_speed(iter/s)": 0.002132
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 200,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 268227602284544.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}