qwen3-sft-checkpoint-72 / trainer_state.json
autoprogrammer's picture
Upload checkpoint-72
eeba7fe verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 72,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013888888888888888,
"grad_norm": 32.20025306755104,
"learning_rate": 0.0,
"loss": 1.6489,
"step": 1
},
{
"epoch": 0.027777777777777776,
"grad_norm": 31.757967894129138,
"learning_rate": 4.5454545454545457e-07,
"loss": 1.5608,
"step": 2
},
{
"epoch": 0.041666666666666664,
"grad_norm": 27.005592965716428,
"learning_rate": 9.090909090909091e-07,
"loss": 1.4318,
"step": 3
},
{
"epoch": 0.05555555555555555,
"grad_norm": 25.876596085278493,
"learning_rate": 1.3636363636363636e-06,
"loss": 1.3459,
"step": 4
},
{
"epoch": 0.06944444444444445,
"grad_norm": 20.05325433673777,
"learning_rate": 1.8181818181818183e-06,
"loss": 1.1116,
"step": 5
},
{
"epoch": 0.08333333333333333,
"grad_norm": 17.2745705787659,
"learning_rate": 2.2727272727272728e-06,
"loss": 1.0695,
"step": 6
},
{
"epoch": 0.09722222222222222,
"grad_norm": 11.547654869877704,
"learning_rate": 2.7272727272727272e-06,
"loss": 1.052,
"step": 7
},
{
"epoch": 0.1111111111111111,
"grad_norm": 7.017914033099854,
"learning_rate": 3.181818181818182e-06,
"loss": 1.0261,
"step": 8
},
{
"epoch": 0.125,
"grad_norm": 6.79190992209115,
"learning_rate": 3.6363636363636366e-06,
"loss": 0.9326,
"step": 9
},
{
"epoch": 0.1388888888888889,
"grad_norm": 6.623522507160447,
"learning_rate": 4.0909090909090915e-06,
"loss": 1.012,
"step": 10
},
{
"epoch": 0.1527777777777778,
"grad_norm": 3.7533541005048554,
"learning_rate": 4.5454545454545455e-06,
"loss": 0.809,
"step": 11
},
{
"epoch": 0.16666666666666666,
"grad_norm": 3.843755827537282,
"learning_rate": 5e-06,
"loss": 0.8047,
"step": 12
},
{
"epoch": 0.18055555555555555,
"grad_norm": 4.34940165800091,
"learning_rate": 5.4545454545454545e-06,
"loss": 0.7493,
"step": 13
},
{
"epoch": 0.19444444444444445,
"grad_norm": 2.6403288766736477,
"learning_rate": 5.90909090909091e-06,
"loss": 0.4881,
"step": 14
},
{
"epoch": 0.20833333333333334,
"grad_norm": 2.1536362938927005,
"learning_rate": 6.363636363636364e-06,
"loss": 0.5604,
"step": 15
},
{
"epoch": 0.2222222222222222,
"grad_norm": 2.943068061086033,
"learning_rate": 6.818181818181818e-06,
"loss": 0.6005,
"step": 16
},
{
"epoch": 0.2361111111111111,
"grad_norm": 2.4040059583348214,
"learning_rate": 7.272727272727273e-06,
"loss": 0.4677,
"step": 17
},
{
"epoch": 0.25,
"grad_norm": 2.373878455942415,
"learning_rate": 7.727272727272727e-06,
"loss": 0.5283,
"step": 18
},
{
"epoch": 0.2638888888888889,
"grad_norm": 2.5632160548134713,
"learning_rate": 8.181818181818183e-06,
"loss": 0.4489,
"step": 19
},
{
"epoch": 0.2777777777777778,
"grad_norm": 2.5874585166377924,
"learning_rate": 8.636363636363637e-06,
"loss": 0.6012,
"step": 20
},
{
"epoch": 0.2916666666666667,
"grad_norm": 2.347060535429832,
"learning_rate": 9.090909090909091e-06,
"loss": 0.4817,
"step": 21
},
{
"epoch": 0.3055555555555556,
"grad_norm": 3.103950712452586,
"learning_rate": 9.545454545454547e-06,
"loss": 0.5688,
"step": 22
},
{
"epoch": 0.3194444444444444,
"grad_norm": 2.7722433588291966,
"learning_rate": 1e-05,
"loss": 0.494,
"step": 23
},
{
"epoch": 0.3333333333333333,
"grad_norm": 2.118018603780166,
"learning_rate": 9.999344418328161e-06,
"loss": 0.3882,
"step": 24
},
{
"epoch": 0.3472222222222222,
"grad_norm": 2.227514369635361,
"learning_rate": 9.997377845227577e-06,
"loss": 0.451,
"step": 25
},
{
"epoch": 0.3611111111111111,
"grad_norm": 1.8352056819401141,
"learning_rate": 9.994100796397954e-06,
"loss": 0.4151,
"step": 26
},
{
"epoch": 0.375,
"grad_norm": 2.0303315554358488,
"learning_rate": 9.98951413118856e-06,
"loss": 0.383,
"step": 27
},
{
"epoch": 0.3888888888888889,
"grad_norm": 1.9876736681198277,
"learning_rate": 9.983619052372847e-06,
"loss": 0.3647,
"step": 28
},
{
"epoch": 0.4027777777777778,
"grad_norm": 1.7843457351766752,
"learning_rate": 9.97641710583307e-06,
"loss": 0.3629,
"step": 29
},
{
"epoch": 0.4166666666666667,
"grad_norm": 1.4930651496160667,
"learning_rate": 9.96791018015489e-06,
"loss": 0.3443,
"step": 30
},
{
"epoch": 0.4305555555555556,
"grad_norm": 1.5759590184712353,
"learning_rate": 9.958100506132127e-06,
"loss": 0.324,
"step": 31
},
{
"epoch": 0.4444444444444444,
"grad_norm": 1.705745384345177,
"learning_rate": 9.946990656181782e-06,
"loss": 0.3095,
"step": 32
},
{
"epoch": 0.4583333333333333,
"grad_norm": 2.0524444353062403,
"learning_rate": 9.934583543669454e-06,
"loss": 0.4268,
"step": 33
},
{
"epoch": 0.4722222222222222,
"grad_norm": 1.6876043891458643,
"learning_rate": 9.920882422145372e-06,
"loss": 0.4535,
"step": 34
},
{
"epoch": 0.4861111111111111,
"grad_norm": 1.7064855964835324,
"learning_rate": 9.905890884491196e-06,
"loss": 0.3926,
"step": 35
},
{
"epoch": 0.5,
"grad_norm": 1.823753890056008,
"learning_rate": 9.889612861977855e-06,
"loss": 0.4343,
"step": 36
},
{
"epoch": 0.5138888888888888,
"grad_norm": 1.8529840016274717,
"learning_rate": 9.872052623234632e-06,
"loss": 0.4281,
"step": 37
},
{
"epoch": 0.5277777777777778,
"grad_norm": 1.955408547526647,
"learning_rate": 9.853214773129796e-06,
"loss": 0.4148,
"step": 38
},
{
"epoch": 0.5416666666666666,
"grad_norm": 1.6681385782954192,
"learning_rate": 9.833104251563058e-06,
"loss": 0.3554,
"step": 39
},
{
"epoch": 0.5555555555555556,
"grad_norm": 1.7455115554623304,
"learning_rate": 9.811726332170153e-06,
"loss": 0.3806,
"step": 40
},
{
"epoch": 0.5694444444444444,
"grad_norm": 1.808587406097976,
"learning_rate": 9.789086620939936e-06,
"loss": 0.318,
"step": 41
},
{
"epoch": 0.5833333333333334,
"grad_norm": 2.0415709012615637,
"learning_rate": 9.765191054744305e-06,
"loss": 0.403,
"step": 42
},
{
"epoch": 0.5972222222222222,
"grad_norm": 1.5062509351045965,
"learning_rate": 9.740045899781353e-06,
"loss": 0.3219,
"step": 43
},
{
"epoch": 0.6111111111111112,
"grad_norm": 1.8233835485592458,
"learning_rate": 9.713657749932172e-06,
"loss": 0.3496,
"step": 44
},
{
"epoch": 0.625,
"grad_norm": 1.55248703557286,
"learning_rate": 9.68603352503172e-06,
"loss": 0.3158,
"step": 45
},
{
"epoch": 0.6388888888888888,
"grad_norm": 1.2775572351579332,
"learning_rate": 9.657180469054213e-06,
"loss": 0.3026,
"step": 46
},
{
"epoch": 0.6527777777777778,
"grad_norm": 1.6282146735823924,
"learning_rate": 9.627106148213521e-06,
"loss": 0.2771,
"step": 47
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.9376858537829651,
"learning_rate": 9.595818448979061e-06,
"loss": 0.2237,
"step": 48
},
{
"epoch": 0.6805555555555556,
"grad_norm": 1.6898368689684822,
"learning_rate": 9.563325576007702e-06,
"loss": 0.3631,
"step": 49
},
{
"epoch": 0.6944444444444444,
"grad_norm": 1.614524493713582,
"learning_rate": 9.529636049992235e-06,
"loss": 0.2762,
"step": 50
},
{
"epoch": 0.7083333333333334,
"grad_norm": 1.4097198478742305,
"learning_rate": 9.494758705426978e-06,
"loss": 0.2929,
"step": 51
},
{
"epoch": 0.7222222222222222,
"grad_norm": 1.2338452097484094,
"learning_rate": 9.458702688291072e-06,
"loss": 0.2741,
"step": 52
},
{
"epoch": 0.7361111111111112,
"grad_norm": 1.5056629548281208,
"learning_rate": 9.421477453650118e-06,
"loss": 0.2946,
"step": 53
},
{
"epoch": 0.75,
"grad_norm": 1.597626165346704,
"learning_rate": 9.38309276317674e-06,
"loss": 0.3472,
"step": 54
},
{
"epoch": 0.7638888888888888,
"grad_norm": 1.2539185771603638,
"learning_rate": 9.343558682590757e-06,
"loss": 0.3247,
"step": 55
},
{
"epoch": 0.7777777777777778,
"grad_norm": 1.5073835531227189,
"learning_rate": 9.302885579019626e-06,
"loss": 0.3089,
"step": 56
},
{
"epoch": 0.7916666666666666,
"grad_norm": 1.1716436585494405,
"learning_rate": 9.261084118279846e-06,
"loss": 0.2243,
"step": 57
},
{
"epoch": 0.8055555555555556,
"grad_norm": 1.615562349308342,
"learning_rate": 9.218165262080024e-06,
"loss": 0.3813,
"step": 58
},
{
"epoch": 0.8194444444444444,
"grad_norm": 1.126432528226203,
"learning_rate": 9.174140265146356e-06,
"loss": 0.2131,
"step": 59
},
{
"epoch": 0.8333333333333334,
"grad_norm": 4.944982415856606,
"learning_rate": 9.129020672271283e-06,
"loss": 0.3533,
"step": 60
},
{
"epoch": 0.8472222222222222,
"grad_norm": 1.8627430033355397,
"learning_rate": 9.082818315286054e-06,
"loss": 0.2986,
"step": 61
},
{
"epoch": 0.8611111111111112,
"grad_norm": 1.247452599538563,
"learning_rate": 9.035545309958048e-06,
"loss": 0.309,
"step": 62
},
{
"epoch": 0.875,
"grad_norm": 1.4040277782456316,
"learning_rate": 8.987214052813605e-06,
"loss": 0.3614,
"step": 63
},
{
"epoch": 0.8888888888888888,
"grad_norm": 1.726105636424933,
"learning_rate": 8.937837217887273e-06,
"loss": 0.2789,
"step": 64
},
{
"epoch": 0.9027777777777778,
"grad_norm": 1.3818975343126196,
"learning_rate": 8.887427753398249e-06,
"loss": 0.295,
"step": 65
},
{
"epoch": 0.9166666666666666,
"grad_norm": 1.40890129019135,
"learning_rate": 8.83599887835493e-06,
"loss": 0.3319,
"step": 66
},
{
"epoch": 0.9305555555555556,
"grad_norm": 1.1840920751941846,
"learning_rate": 8.783564079088478e-06,
"loss": 0.2327,
"step": 67
},
{
"epoch": 0.9444444444444444,
"grad_norm": 1.6408315149496815,
"learning_rate": 8.730137105716231e-06,
"loss": 0.3666,
"step": 68
},
{
"epoch": 0.9583333333333334,
"grad_norm": 1.3727819885809827,
"learning_rate": 8.675731968536004e-06,
"loss": 0.2455,
"step": 69
},
{
"epoch": 0.9722222222222222,
"grad_norm": 1.2514474549155656,
"learning_rate": 8.620362934352109e-06,
"loss": 0.2722,
"step": 70
},
{
"epoch": 0.9861111111111112,
"grad_norm": 1.7164065718751929,
"learning_rate": 8.564044522734147e-06,
"loss": 0.382,
"step": 71
},
{
"epoch": 1.0,
"grad_norm": 1.471855648534107,
"learning_rate": 8.506791502209497e-06,
"loss": 0.2948,
"step": 72
}
],
"logging_steps": 1,
"max_steps": 216,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 27478008823808.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}