sft / full_sft /trainer_state.json
zx10086's picture
Add files using upload-large-folder tool
8946e54 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9991111111111111,
"eval_steps": 100,
"global_step": 562,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017777777777777778,
"grad_norm": 1.0419072440732986,
"learning_rate": 4.736842105263158e-07,
"loss": 0.2211,
"step": 10
},
{
"epoch": 0.035555555555555556,
"grad_norm": 0.7452424824963847,
"learning_rate": 1e-06,
"loss": 0.2157,
"step": 20
},
{
"epoch": 0.05333333333333334,
"grad_norm": 0.6931284527687698,
"learning_rate": 1.5263157894736844e-06,
"loss": 0.1966,
"step": 30
},
{
"epoch": 0.07111111111111111,
"grad_norm": 0.8280088130256422,
"learning_rate": 2.0526315789473687e-06,
"loss": 0.1673,
"step": 40
},
{
"epoch": 0.08888888888888889,
"grad_norm": 0.6287446128091657,
"learning_rate": 2.5789473684210527e-06,
"loss": 0.1401,
"step": 50
},
{
"epoch": 0.10666666666666667,
"grad_norm": 0.6232220640101711,
"learning_rate": 2.9998838998872806e-06,
"loss": 0.139,
"step": 60
},
{
"epoch": 0.12444444444444444,
"grad_norm": 0.5474054010608423,
"learning_rate": 2.9958222827235943e-06,
"loss": 0.1338,
"step": 70
},
{
"epoch": 0.14222222222222222,
"grad_norm": 0.5781997563253718,
"learning_rate": 2.9859736197372397e-06,
"loss": 0.1254,
"step": 80
},
{
"epoch": 0.16,
"grad_norm": 0.5531747787904319,
"learning_rate": 2.9703760134920394e-06,
"loss": 0.1188,
"step": 90
},
{
"epoch": 0.17777777777777778,
"grad_norm": 0.561221143893872,
"learning_rate": 2.9490898080964526e-06,
"loss": 0.1176,
"step": 100
},
{
"epoch": 0.17777777777777778,
"eval_loss": 0.12179438769817352,
"eval_runtime": 184.0616,
"eval_samples_per_second": 5.433,
"eval_steps_per_second": 1.358,
"step": 100
},
{
"epoch": 0.19555555555555557,
"grad_norm": 0.6020618516419041,
"learning_rate": 2.9221973557439444e-06,
"loss": 0.1175,
"step": 110
},
{
"epoch": 0.21333333333333335,
"grad_norm": 0.5741224360735192,
"learning_rate": 2.8898026981083445e-06,
"loss": 0.1171,
"step": 120
},
{
"epoch": 0.2311111111111111,
"grad_norm": 0.6274532992299141,
"learning_rate": 2.8520311638268266e-06,
"loss": 0.1253,
"step": 130
},
{
"epoch": 0.24888888888888888,
"grad_norm": 0.49781263753856264,
"learning_rate": 2.809028883627756e-06,
"loss": 0.1187,
"step": 140
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.5549229993939675,
"learning_rate": 2.7609622249792905e-06,
"loss": 0.1141,
"step": 150
},
{
"epoch": 0.28444444444444444,
"grad_norm": 0.661411990363366,
"learning_rate": 2.7080171484459668e-06,
"loss": 0.119,
"step": 160
},
{
"epoch": 0.3022222222222222,
"grad_norm": 0.6107363356245499,
"learning_rate": 2.6503984882434005e-06,
"loss": 0.1181,
"step": 170
},
{
"epoch": 0.32,
"grad_norm": 0.573957678222765,
"learning_rate": 2.5883291597745014e-06,
"loss": 0.1187,
"step": 180
},
{
"epoch": 0.3377777777777778,
"grad_norm": 0.6138727972165942,
"learning_rate": 2.5220492972130806e-06,
"loss": 0.1197,
"step": 190
},
{
"epoch": 0.35555555555555557,
"grad_norm": 0.5948385479167928,
"learning_rate": 2.451815324471386e-06,
"loss": 0.1218,
"step": 200
},
{
"epoch": 0.35555555555555557,
"eval_loss": 0.11735764890909195,
"eval_runtime": 183.9826,
"eval_samples_per_second": 5.435,
"eval_steps_per_second": 1.359,
"step": 200
},
{
"epoch": 0.37333333333333335,
"grad_norm": 0.6295964285383479,
"learning_rate": 2.3778989631457997e-06,
"loss": 0.1194,
"step": 210
},
{
"epoch": 0.39111111111111113,
"grad_norm": 0.6560232772550603,
"learning_rate": 2.3005861812787647e-06,
"loss": 0.1207,
"step": 220
},
{
"epoch": 0.4088888888888889,
"grad_norm": 0.6047394379817188,
"learning_rate": 2.2201760870039892e-06,
"loss": 0.1209,
"step": 230
},
{
"epoch": 0.4266666666666667,
"grad_norm": 0.6505634589992538,
"learning_rate": 2.136979771355179e-06,
"loss": 0.1192,
"step": 240
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.5883524047074119,
"learning_rate": 2.0513191047152655e-06,
"loss": 0.1179,
"step": 250
},
{
"epoch": 0.4622222222222222,
"grad_norm": 0.6038782058032554,
"learning_rate": 1.963525491562421e-06,
"loss": 0.1136,
"step": 260
},
{
"epoch": 0.48,
"grad_norm": 0.5577708881312057,
"learning_rate": 1.873938588330516e-06,
"loss": 0.1193,
"step": 270
},
{
"epoch": 0.49777777777777776,
"grad_norm": 0.6115359231627819,
"learning_rate": 1.782904989344358e-06,
"loss": 0.1175,
"step": 280
},
{
"epoch": 0.5155555555555555,
"grad_norm": 0.5410746194499723,
"learning_rate": 1.6907768859135836e-06,
"loss": 0.1162,
"step": 290
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.4542682496528684,
"learning_rate": 1.5979107037728993e-06,
"loss": 0.1198,
"step": 300
},
{
"epoch": 0.5333333333333333,
"eval_loss": 0.11582176387310028,
"eval_runtime": 183.9264,
"eval_samples_per_second": 5.437,
"eval_steps_per_second": 1.359,
"step": 300
},
{
"epoch": 0.5511111111111111,
"grad_norm": 0.5825687136887242,
"learning_rate": 1.504665724140154e-06,
"loss": 0.1152,
"step": 310
},
{
"epoch": 0.5688888888888889,
"grad_norm": 0.6078813327746706,
"learning_rate": 1.4114026937271082e-06,
"loss": 0.1137,
"step": 320
},
{
"epoch": 0.5866666666666667,
"grad_norm": 0.6001130542293966,
"learning_rate": 1.3184824290804824e-06,
"loss": 0.1108,
"step": 330
},
{
"epoch": 0.6044444444444445,
"grad_norm": 0.635870028578828,
"learning_rate": 1.2262644206528362e-06,
"loss": 0.1152,
"step": 340
},
{
"epoch": 0.6222222222222222,
"grad_norm": 0.6138033357625291,
"learning_rate": 1.1351054420038428e-06,
"loss": 0.1134,
"step": 350
},
{
"epoch": 0.64,
"grad_norm": 0.5947837720559314,
"learning_rate": 1.0453581695126965e-06,
"loss": 0.1088,
"step": 360
},
{
"epoch": 0.6577777777777778,
"grad_norm": 0.6264430952043478,
"learning_rate": 9.573698179417152e-07,
"loss": 0.1125,
"step": 370
},
{
"epoch": 0.6755555555555556,
"grad_norm": 0.6176320518913175,
"learning_rate": 8.71480797129877e-07,
"loss": 0.1182,
"step": 380
},
{
"epoch": 0.6933333333333334,
"grad_norm": 0.513024178056074,
"learning_rate": 7.880233950132851e-07,
"loss": 0.1189,
"step": 390
},
{
"epoch": 0.7111111111111111,
"grad_norm": 0.6350615284149549,
"learning_rate": 7.073204920676936e-07,
"loss": 0.1137,
"step": 400
},
{
"epoch": 0.7111111111111111,
"eval_loss": 0.11508546024560928,
"eval_runtime": 183.9493,
"eval_samples_per_second": 5.436,
"eval_steps_per_second": 1.359,
"step": 400
},
{
"epoch": 0.7288888888888889,
"grad_norm": 0.48363541573795593,
"learning_rate": 6.296843121466727e-07,
"loss": 0.1127,
"step": 410
},
{
"epoch": 0.7466666666666667,
"grad_norm": 0.5643378335194024,
"learning_rate": 5.554152145481741e-07,
"loss": 0.1174,
"step": 420
},
{
"epoch": 0.7644444444444445,
"grad_norm": 0.6468419521537324,
"learning_rate": 4.848005319827576e-07,
"loss": 0.1141,
"step": 430
},
{
"epoch": 0.7822222222222223,
"grad_norm": 0.46487157780661714,
"learning_rate": 4.181134589391536e-07,
"loss": 0.115,
"step": 440
},
{
"epoch": 0.8,
"grad_norm": 0.5754274431734688,
"learning_rate": 3.556119947478568e-07,
"loss": 0.1126,
"step": 450
},
{
"epoch": 0.8177777777777778,
"grad_norm": 0.6263443068823263,
"learning_rate": 2.975379454318402e-07,
"loss": 0.1176,
"step": 460
},
{
"epoch": 0.8355555555555556,
"grad_norm": 0.6218792137543429,
"learning_rate": 2.441159882060252e-07,
"loss": 0.1127,
"step": 470
},
{
"epoch": 0.8533333333333334,
"grad_norm": 0.5555853733643114,
"learning_rate": 1.9555280224478466e-07,
"loss": 0.1151,
"step": 480
},
{
"epoch": 0.8711111111111111,
"grad_norm": 0.5739728343376987,
"learning_rate": 1.520362690803691e-07,
"loss": 0.115,
"step": 490
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.6360741808789474,
"learning_rate": 1.1373474572575687e-07,
"loss": 0.1123,
"step": 500
},
{
"epoch": 0.8888888888888888,
"eval_loss": 0.11468039453029633,
"eval_runtime": 184.0621,
"eval_samples_per_second": 5.433,
"eval_steps_per_second": 1.358,
"step": 500
},
{
"epoch": 0.9066666666666666,
"grad_norm": 0.5688753227202804,
"learning_rate": 8.079641333407817e-08,
"loss": 0.1143,
"step": 510
},
{
"epoch": 0.9244444444444444,
"grad_norm": 0.5733667194972327,
"learning_rate": 5.3348703914518846e-08,
"loss": 0.1113,
"step": 520
},
{
"epoch": 0.9422222222222222,
"grad_norm": 0.573336141508557,
"learning_rate": 3.149780732263402e-08,
"loss": 0.1117,
"step": 530
},
{
"epoch": 0.96,
"grad_norm": 0.5491519278016026,
"learning_rate": 1.5328260432425355e-08,
"loss": 0.1149,
"step": 540
},
{
"epoch": 0.9777777777777777,
"grad_norm": 0.6502017981844215,
"learning_rate": 4.902620079598741e-09,
"loss": 0.1155,
"step": 550
},
{
"epoch": 0.9955555555555555,
"grad_norm": 0.5247241217236942,
"learning_rate": 2.612210413192595e-10,
"loss": 0.1105,
"step": 560
},
{
"epoch": 0.9991111111111111,
"step": 562,
"total_flos": 35140758945792.0,
"train_loss": 0.12346158789145989,
"train_runtime": 5252.3327,
"train_samples_per_second": 1.714,
"train_steps_per_second": 0.107
}
],
"logging_steps": 10,
"max_steps": 562,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 35140758945792.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}