zai2025 / trainer_state.json
ngoan's picture
Upload folder using huggingface_hub
42113d7 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3386243386243386,
"eval_steps": 500,
"global_step": 80,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016931216931216932,
"grad_norm": 2322.343994140625,
"learning_rate": 0.0,
"loss": 1.2269,
"step": 1
},
{
"epoch": 0.033862433862433865,
"grad_norm": 12.033977508544922,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.2059,
"step": 2
},
{
"epoch": 0.050793650793650794,
"grad_norm": 10.809624671936035,
"learning_rate": 6.666666666666667e-06,
"loss": 1.2198,
"step": 3
},
{
"epoch": 0.06772486772486773,
"grad_norm": 5.77828311920166,
"learning_rate": 1e-05,
"loss": 1.0599,
"step": 4
},
{
"epoch": 0.08465608465608465,
"grad_norm": 5.281270503997803,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.9754,
"step": 5
},
{
"epoch": 0.10158730158730159,
"grad_norm": 5.149123191833496,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.9476,
"step": 6
},
{
"epoch": 0.11851851851851852,
"grad_norm": 6.636379718780518,
"learning_rate": 2e-05,
"loss": 1.0585,
"step": 7
},
{
"epoch": 0.13544973544973546,
"grad_norm": 3.8481030464172363,
"learning_rate": 1.9998370105646414e-05,
"loss": 0.9413,
"step": 8
},
{
"epoch": 0.1523809523809524,
"grad_norm": 3.835693120956421,
"learning_rate": 1.999348095389677e-05,
"loss": 0.8891,
"step": 9
},
{
"epoch": 0.1693121693121693,
"grad_norm": 2.8596138954162598,
"learning_rate": 1.998533413851124e-05,
"loss": 0.8795,
"step": 10
},
{
"epoch": 0.18624338624338624,
"grad_norm": 2.8256287574768066,
"learning_rate": 1.9973932315179502e-05,
"loss": 0.8446,
"step": 11
},
{
"epoch": 0.20317460317460317,
"grad_norm": 5.52194356918335,
"learning_rate": 1.9959279200655044e-05,
"loss": 0.8027,
"step": 12
},
{
"epoch": 0.2201058201058201,
"grad_norm": 2.0485758781433105,
"learning_rate": 1.9941379571543597e-05,
"loss": 0.7859,
"step": 13
},
{
"epoch": 0.23703703703703705,
"grad_norm": 2.2072129249572754,
"learning_rate": 1.9920239262746045e-05,
"loss": 0.7766,
"step": 14
},
{
"epoch": 0.25396825396825395,
"grad_norm": 2.154557943344116,
"learning_rate": 1.9895865165556375e-05,
"loss": 0.7366,
"step": 15
},
{
"epoch": 0.2708994708994709,
"grad_norm": 1.9130762815475464,
"learning_rate": 1.9868265225415263e-05,
"loss": 0.7432,
"step": 16
},
{
"epoch": 0.2878306878306878,
"grad_norm": 1.6504809856414795,
"learning_rate": 1.9837448439320027e-05,
"loss": 0.6905,
"step": 17
},
{
"epoch": 0.3047619047619048,
"grad_norm": 1.7797411680221558,
"learning_rate": 1.9803424852891803e-05,
"loss": 0.6799,
"step": 18
},
{
"epoch": 0.3216931216931217,
"grad_norm": 1.7259820699691772,
"learning_rate": 1.976620555710087e-05,
"loss": 0.6706,
"step": 19
},
{
"epoch": 0.3386243386243386,
"grad_norm": 1.4618961811065674,
"learning_rate": 1.9725802684651235e-05,
"loss": 0.6687,
"step": 20
},
{
"epoch": 0.35555555555555557,
"grad_norm": 1.5048692226409912,
"learning_rate": 1.9682229406025635e-05,
"loss": 0.6762,
"step": 21
},
{
"epoch": 0.3724867724867725,
"grad_norm": 1.4504609107971191,
"learning_rate": 1.963549992519223e-05,
"loss": 0.6631,
"step": 22
},
{
"epoch": 0.38941798941798944,
"grad_norm": 1.4441351890563965,
"learning_rate": 1.9585629474974413e-05,
"loss": 0.6394,
"step": 23
},
{
"epoch": 0.40634920634920635,
"grad_norm": 1.2873355150222778,
"learning_rate": 1.953263431208523e-05,
"loss": 0.6279,
"step": 24
},
{
"epoch": 0.42328042328042326,
"grad_norm": 1.4676573276519775,
"learning_rate": 1.9476531711828027e-05,
"loss": 0.6238,
"step": 25
},
{
"epoch": 0.4402116402116402,
"grad_norm": 1.3487250804901123,
"learning_rate": 1.9417339962465084e-05,
"loss": 0.6253,
"step": 26
},
{
"epoch": 0.45714285714285713,
"grad_norm": 23.1207275390625,
"learning_rate": 1.935507835925601e-05,
"loss": 0.6166,
"step": 27
},
{
"epoch": 0.4740740740740741,
"grad_norm": 1.408158540725708,
"learning_rate": 1.9289767198167918e-05,
"loss": 0.5644,
"step": 28
},
{
"epoch": 0.491005291005291,
"grad_norm": 1.199977159500122,
"learning_rate": 1.9221427769259333e-05,
"loss": 0.5865,
"step": 29
},
{
"epoch": 0.5079365079365079,
"grad_norm": 1.1895148754119873,
"learning_rate": 1.9150082349740123e-05,
"loss": 0.5619,
"step": 30
},
{
"epoch": 0.5248677248677248,
"grad_norm": 1.1904869079589844,
"learning_rate": 1.9075754196709574e-05,
"loss": 0.5642,
"step": 31
},
{
"epoch": 0.5417989417989418,
"grad_norm": 1.4911994934082031,
"learning_rate": 1.899846753957507e-05,
"loss": 0.5684,
"step": 32
},
{
"epoch": 0.5587301587301587,
"grad_norm": 1.2807954549789429,
"learning_rate": 1.8918247572153822e-05,
"loss": 0.532,
"step": 33
},
{
"epoch": 0.5756613756613757,
"grad_norm": 1.1882894039154053,
"learning_rate": 1.883512044446023e-05,
"loss": 0.5527,
"step": 34
},
{
"epoch": 0.5925925925925926,
"grad_norm": 1.3212125301361084,
"learning_rate": 1.8749113254181498e-05,
"loss": 0.5621,
"step": 35
},
{
"epoch": 0.6095238095238096,
"grad_norm": 1.4091153144836426,
"learning_rate": 1.866025403784439e-05,
"loss": 0.5205,
"step": 36
},
{
"epoch": 0.6264550264550265,
"grad_norm": 1.1614974737167358,
"learning_rate": 1.8568571761675893e-05,
"loss": 0.5181,
"step": 37
},
{
"epoch": 0.6433862433862434,
"grad_norm": 1.0888781547546387,
"learning_rate": 1.8474096312160866e-05,
"loss": 0.5359,
"step": 38
},
{
"epoch": 0.6603174603174603,
"grad_norm": 1.303357720375061,
"learning_rate": 1.837685848629965e-05,
"loss": 0.5256,
"step": 39
},
{
"epoch": 0.6772486772486772,
"grad_norm": 1.1001622676849365,
"learning_rate": 1.827688998156891e-05,
"loss": 0.5277,
"step": 40
},
{
"epoch": 0.6941798941798942,
"grad_norm": 16.82403564453125,
"learning_rate": 1.817422338558892e-05,
"loss": 0.5301,
"step": 41
},
{
"epoch": 0.7111111111111111,
"grad_norm": 1.2562721967697144,
"learning_rate": 1.8068892165500704e-05,
"loss": 0.5019,
"step": 42
},
{
"epoch": 0.728042328042328,
"grad_norm": 1.143244981765747,
"learning_rate": 1.796093065705644e-05,
"loss": 0.5098,
"step": 43
},
{
"epoch": 0.744973544973545,
"grad_norm": 1.2346585988998413,
"learning_rate": 1.7850374053426725e-05,
"loss": 0.5079,
"step": 44
},
{
"epoch": 0.7619047619047619,
"grad_norm": 1.292527675628662,
"learning_rate": 1.7737258393728363e-05,
"loss": 0.4987,
"step": 45
},
{
"epoch": 0.7788359788359789,
"grad_norm": 1.0964988470077515,
"learning_rate": 1.7621620551276366e-05,
"loss": 0.52,
"step": 46
},
{
"epoch": 0.7957671957671958,
"grad_norm": 1.3093878030776978,
"learning_rate": 1.7503498221564026e-05,
"loss": 0.4911,
"step": 47
},
{
"epoch": 0.8126984126984127,
"grad_norm": 1.2538305521011353,
"learning_rate": 1.7382929909974988e-05,
"loss": 0.4805,
"step": 48
},
{
"epoch": 0.8296296296296296,
"grad_norm": 1.5297569036483765,
"learning_rate": 1.725995491923131e-05,
"loss": 0.4834,
"step": 49
},
{
"epoch": 0.8465608465608465,
"grad_norm": 1.126209020614624,
"learning_rate": 1.7134613336581602e-05,
"loss": 0.474,
"step": 50
},
{
"epoch": 0.8634920634920635,
"grad_norm": 1.2999428510665894,
"learning_rate": 1.7006946020733426e-05,
"loss": 0.4662,
"step": 51
},
{
"epoch": 0.8804232804232804,
"grad_norm": 3.900702714920044,
"learning_rate": 1.6876994588534234e-05,
"loss": 0.4728,
"step": 52
},
{
"epoch": 0.8973544973544973,
"grad_norm": 1.1961653232574463,
"learning_rate": 1.6744801401405138e-05,
"loss": 0.4638,
"step": 53
},
{
"epoch": 0.9142857142857143,
"grad_norm": 1.1492871046066284,
"learning_rate": 1.6610409551532006e-05,
"loss": 0.4608,
"step": 54
},
{
"epoch": 0.9312169312169312,
"grad_norm": 1.1923617124557495,
"learning_rate": 1.647386284781828e-05,
"loss": 0.4742,
"step": 55
},
{
"epoch": 0.9481481481481482,
"grad_norm": 1.0757859945297241,
"learning_rate": 1.6335205801604242e-05,
"loss": 0.4757,
"step": 56
},
{
"epoch": 0.9650793650793651,
"grad_norm": 1.0793986320495605,
"learning_rate": 1.6194483612157232e-05,
"loss": 0.4543,
"step": 57
},
{
"epoch": 0.982010582010582,
"grad_norm": 1.0309922695159912,
"learning_rate": 1.6051742151937655e-05,
"loss": 0.4637,
"step": 58
},
{
"epoch": 0.9989417989417989,
"grad_norm": 1.0178518295288086,
"learning_rate": 1.590702795164551e-05,
"loss": 0.4845,
"step": 59
},
{
"epoch": 1.0,
"grad_norm": 1.0178518295288086,
"learning_rate": 1.57603881850524e-05,
"loss": 0.4151,
"step": 60
},
{
"epoch": 1.016931216931217,
"grad_norm": 6.670537948608398,
"learning_rate": 1.5611870653623826e-05,
"loss": 0.4583,
"step": 61
},
{
"epoch": 1.0338624338624338,
"grad_norm": 1.1750141382217407,
"learning_rate": 1.546152377093697e-05,
"loss": 0.4378,
"step": 62
},
{
"epoch": 1.0507936507936508,
"grad_norm": 1.258583903312683,
"learning_rate": 1.530939654689887e-05,
"loss": 0.4698,
"step": 63
},
{
"epoch": 1.0677248677248676,
"grad_norm": 1.1863641738891602,
"learning_rate": 1.515553857177022e-05,
"loss": 0.4493,
"step": 64
},
{
"epoch": 1.0846560846560847,
"grad_norm": 1.2087653875350952,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.4495,
"step": 65
},
{
"epoch": 1.1015873015873017,
"grad_norm": 1.1390602588653564,
"learning_rate": 1.4842831533876196e-05,
"loss": 0.4628,
"step": 66
},
{
"epoch": 1.1185185185185185,
"grad_norm": 1.0523955821990967,
"learning_rate": 1.4684084406997903e-05,
"loss": 0.4444,
"step": 67
},
{
"epoch": 1.1354497354497355,
"grad_norm": 1.3320904970169067,
"learning_rate": 1.4523810367574271e-05,
"loss": 0.4487,
"step": 68
},
{
"epoch": 1.1523809523809523,
"grad_norm": 2.979928970336914,
"learning_rate": 1.4362061661555675e-05,
"loss": 0.4568,
"step": 69
},
{
"epoch": 1.1693121693121693,
"grad_norm": 1.2313185930252075,
"learning_rate": 1.4198891015602648e-05,
"loss": 0.4359,
"step": 70
},
{
"epoch": 1.1862433862433863,
"grad_norm": 1.1036418676376343,
"learning_rate": 1.4034351619898088e-05,
"loss": 0.4698,
"step": 71
},
{
"epoch": 1.2031746031746031,
"grad_norm": 1.1762608289718628,
"learning_rate": 1.3868497110808394e-05,
"loss": 0.4313,
"step": 72
},
{
"epoch": 1.2201058201058201,
"grad_norm": 1.033484697341919,
"learning_rate": 1.3701381553399147e-05,
"loss": 0.45,
"step": 73
},
{
"epoch": 1.237037037037037,
"grad_norm": 2.2186121940612793,
"learning_rate": 1.3533059423811026e-05,
"loss": 0.4345,
"step": 74
},
{
"epoch": 1.253968253968254,
"grad_norm": 1.0926917791366577,
"learning_rate": 1.3363585591501751e-05,
"loss": 0.4317,
"step": 75
},
{
"epoch": 1.270899470899471,
"grad_norm": 1.094223976135254,
"learning_rate": 1.31930153013598e-05,
"loss": 0.4274,
"step": 76
},
{
"epoch": 1.2878306878306878,
"grad_norm": 1.0197666883468628,
"learning_rate": 1.3021404155695728e-05,
"loss": 0.4465,
"step": 77
},
{
"epoch": 1.3047619047619048,
"grad_norm": 1.0113344192504883,
"learning_rate": 1.2848808096117003e-05,
"loss": 0.4256,
"step": 78
},
{
"epoch": 1.3216931216931216,
"grad_norm": 0.9916861653327942,
"learning_rate": 1.2675283385292212e-05,
"loss": 0.4291,
"step": 79
},
{
"epoch": 1.3386243386243386,
"grad_norm": 1.0909327268600464,
"learning_rate": 1.250088658861063e-05,
"loss": 0.4387,
"step": 80
}
],
"logging_steps": 1.0,
"max_steps": 180,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.636674904683184e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}