AFM-WebAgent-32B-sft / trainer_state.json
wzyxwqx's picture
Upload folder using huggingface_hub
9cd9853 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.663900414937759,
"eval_steps": 500,
"global_step": 110,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03319502074688797,
"grad_norm": 4.213273525238037,
"learning_rate": 0.0,
"loss": 1.2016,
"step": 1
},
{
"epoch": 0.06639004149377593,
"grad_norm": 4.245621204376221,
"learning_rate": 2.666666666666667e-06,
"loss": 1.231,
"step": 2
},
{
"epoch": 0.0995850622406639,
"grad_norm": 3.9789071083068848,
"learning_rate": 5.333333333333334e-06,
"loss": 1.2195,
"step": 3
},
{
"epoch": 0.13278008298755187,
"grad_norm": 2.037877082824707,
"learning_rate": 8.000000000000001e-06,
"loss": 1.1222,
"step": 4
},
{
"epoch": 0.16597510373443983,
"grad_norm": 1.7719324827194214,
"learning_rate": 1.0666666666666667e-05,
"loss": 1.0122,
"step": 5
},
{
"epoch": 0.1991701244813278,
"grad_norm": 1.4349164962768555,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.0204,
"step": 6
},
{
"epoch": 0.23236514522821577,
"grad_norm": 1.9872256517410278,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.9755,
"step": 7
},
{
"epoch": 0.26556016597510373,
"grad_norm": 1.5722675323486328,
"learning_rate": 1.866666666666667e-05,
"loss": 0.9255,
"step": 8
},
{
"epoch": 0.2987551867219917,
"grad_norm": 1.8592041730880737,
"learning_rate": 2.1333333333333335e-05,
"loss": 0.915,
"step": 9
},
{
"epoch": 0.33195020746887965,
"grad_norm": 1.2490234375,
"learning_rate": 2.4e-05,
"loss": 0.8887,
"step": 10
},
{
"epoch": 0.3651452282157676,
"grad_norm": 1.2043609619140625,
"learning_rate": 2.6666666666666667e-05,
"loss": 0.8753,
"step": 11
},
{
"epoch": 0.3983402489626556,
"grad_norm": 1.0329132080078125,
"learning_rate": 2.9333333333333333e-05,
"loss": 0.8286,
"step": 12
},
{
"epoch": 0.4315352697095436,
"grad_norm": 0.9864183068275452,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.8324,
"step": 13
},
{
"epoch": 0.46473029045643155,
"grad_norm": 0.8905398845672607,
"learning_rate": 3.466666666666667e-05,
"loss": 0.8275,
"step": 14
},
{
"epoch": 0.4979253112033195,
"grad_norm": 0.8707190752029419,
"learning_rate": 3.733333333333334e-05,
"loss": 0.8337,
"step": 15
},
{
"epoch": 0.5311203319502075,
"grad_norm": 0.7187903523445129,
"learning_rate": 4e-05,
"loss": 0.8335,
"step": 16
},
{
"epoch": 0.5643153526970954,
"grad_norm": 0.8947873115539551,
"learning_rate": 3.999458482358924e-05,
"loss": 0.8081,
"step": 17
},
{
"epoch": 0.5975103734439834,
"grad_norm": 0.7211642265319824,
"learning_rate": 3.99783422267705e-05,
"loss": 0.8138,
"step": 18
},
{
"epoch": 0.6307053941908713,
"grad_norm": 0.7200644016265869,
"learning_rate": 3.9951281005196486e-05,
"loss": 0.7912,
"step": 19
},
{
"epoch": 0.6639004149377593,
"grad_norm": 0.630005955696106,
"learning_rate": 3.991341581299609e-05,
"loss": 0.7977,
"step": 20
},
{
"epoch": 0.6970954356846473,
"grad_norm": 0.5824337005615234,
"learning_rate": 3.9864767154838864e-05,
"loss": 0.7702,
"step": 21
},
{
"epoch": 0.7302904564315352,
"grad_norm": 0.6924874186515808,
"learning_rate": 3.980536137483141e-05,
"loss": 0.7815,
"step": 22
},
{
"epoch": 0.7634854771784232,
"grad_norm": 0.5716642141342163,
"learning_rate": 3.973523064225159e-05,
"loss": 0.7663,
"step": 23
},
{
"epoch": 0.7966804979253111,
"grad_norm": 0.607197642326355,
"learning_rate": 3.965441293412827e-05,
"loss": 0.7863,
"step": 24
},
{
"epoch": 0.8298755186721992,
"grad_norm": 0.6522372961044312,
"learning_rate": 3.9562952014676116e-05,
"loss": 0.7684,
"step": 25
},
{
"epoch": 0.8630705394190872,
"grad_norm": 0.47301605343818665,
"learning_rate": 3.946089741159648e-05,
"loss": 0.7687,
"step": 26
},
{
"epoch": 0.8962655601659751,
"grad_norm": 0.6211557984352112,
"learning_rate": 3.934830438925728e-05,
"loss": 0.7541,
"step": 27
},
{
"epoch": 0.9294605809128631,
"grad_norm": 0.4624919295310974,
"learning_rate": 3.922523391876638e-05,
"loss": 0.7383,
"step": 28
},
{
"epoch": 0.9626556016597511,
"grad_norm": 0.45500412583351135,
"learning_rate": 3.909175264495464e-05,
"loss": 0.7515,
"step": 29
},
{
"epoch": 0.995850622406639,
"grad_norm": 0.43937766551971436,
"learning_rate": 3.8947932850286585e-05,
"loss": 0.7396,
"step": 30
},
{
"epoch": 1.033195020746888,
"grad_norm": 1.3270426988601685,
"learning_rate": 3.879385241571817e-05,
"loss": 1.4314,
"step": 31
},
{
"epoch": 1.066390041493776,
"grad_norm": 0.6129482388496399,
"learning_rate": 3.862959477852285e-05,
"loss": 0.7111,
"step": 32
},
{
"epoch": 1.099585062240664,
"grad_norm": 0.5896479487419128,
"learning_rate": 3.845524888710885e-05,
"loss": 0.7171,
"step": 33
},
{
"epoch": 1.1327800829875518,
"grad_norm": 0.5441771745681763,
"learning_rate": 3.827090915285202e-05,
"loss": 0.6981,
"step": 34
},
{
"epoch": 1.16597510373444,
"grad_norm": 0.5450417995452881,
"learning_rate": 3.807667539897041e-05,
"loss": 0.7072,
"step": 35
},
{
"epoch": 1.1991701244813278,
"grad_norm": 0.5991458892822266,
"learning_rate": 3.787265280646825e-05,
"loss": 0.7033,
"step": 36
},
{
"epoch": 1.2323651452282158,
"grad_norm": 0.4586258828639984,
"learning_rate": 3.7658951857178544e-05,
"loss": 0.7083,
"step": 37
},
{
"epoch": 1.2655601659751037,
"grad_norm": 0.5916818976402283,
"learning_rate": 3.743568827393525e-05,
"loss": 0.6842,
"step": 38
},
{
"epoch": 1.2987551867219918,
"grad_norm": 0.5111647844314575,
"learning_rate": 3.720298295790732e-05,
"loss": 0.6761,
"step": 39
},
{
"epoch": 1.3319502074688796,
"grad_norm": 0.537373960018158,
"learning_rate": 3.696096192312852e-05,
"loss": 0.6751,
"step": 40
},
{
"epoch": 1.3651452282157677,
"grad_norm": 0.490434467792511,
"learning_rate": 3.6709756228258735e-05,
"loss": 0.6889,
"step": 41
},
{
"epoch": 1.3983402489626555,
"grad_norm": 0.5154665112495422,
"learning_rate": 3.644950190561325e-05,
"loss": 0.6965,
"step": 42
},
{
"epoch": 1.4315352697095436,
"grad_norm": 0.42567506432533264,
"learning_rate": 3.6180339887498953e-05,
"loss": 0.693,
"step": 43
},
{
"epoch": 1.4647302904564317,
"grad_norm": 0.4047231674194336,
"learning_rate": 3.590241592989696e-05,
"loss": 0.6889,
"step": 44
},
{
"epoch": 1.4979253112033195,
"grad_norm": 0.46269938349723816,
"learning_rate": 3.561588053353319e-05,
"loss": 0.6854,
"step": 45
},
{
"epoch": 1.5311203319502074,
"grad_norm": 0.4641977846622467,
"learning_rate": 3.532088886237956e-05,
"loss": 0.6946,
"step": 46
},
{
"epoch": 1.5643153526970954,
"grad_norm": 0.39332008361816406,
"learning_rate": 3.5017600659629986e-05,
"loss": 0.6835,
"step": 47
},
{
"epoch": 1.5975103734439835,
"grad_norm": 0.44407254457473755,
"learning_rate": 3.470618016119658e-05,
"loss": 0.692,
"step": 48
},
{
"epoch": 1.6307053941908713,
"grad_norm": 0.38414013385772705,
"learning_rate": 3.438679600677303e-05,
"loss": 0.6775,
"step": 49
},
{
"epoch": 1.6639004149377592,
"grad_norm": 0.4078037440776825,
"learning_rate": 3.405962114851324e-05,
"loss": 0.6685,
"step": 50
},
{
"epoch": 1.6970954356846473,
"grad_norm": 0.43119096755981445,
"learning_rate": 3.372483275737468e-05,
"loss": 0.6581,
"step": 51
},
{
"epoch": 1.7302904564315353,
"grad_norm": 0.39673659205436707,
"learning_rate": 3.3382612127177166e-05,
"loss": 0.6748,
"step": 52
},
{
"epoch": 1.7634854771784232,
"grad_norm": 0.36500445008277893,
"learning_rate": 3.303314457642911e-05,
"loss": 0.6556,
"step": 53
},
{
"epoch": 1.796680497925311,
"grad_norm": 0.3905717134475708,
"learning_rate": 3.26766193479742e-05,
"loss": 0.67,
"step": 54
},
{
"epoch": 1.8298755186721993,
"grad_norm": 0.38068848848342896,
"learning_rate": 3.2313229506513167e-05,
"loss": 0.6526,
"step": 55
},
{
"epoch": 1.8630705394190872,
"grad_norm": 0.41845059394836426,
"learning_rate": 3.194317183405573e-05,
"loss": 0.6584,
"step": 56
},
{
"epoch": 1.896265560165975,
"grad_norm": 0.3980270028114319,
"learning_rate": 3.156664672335973e-05,
"loss": 0.6706,
"step": 57
},
{
"epoch": 1.929460580912863,
"grad_norm": 0.4714384377002716,
"learning_rate": 3.1183858069414936e-05,
"loss": 0.6658,
"step": 58
},
{
"epoch": 1.9626556016597512,
"grad_norm": 0.40001410245895386,
"learning_rate": 3.079501315903026e-05,
"loss": 0.6733,
"step": 59
},
{
"epoch": 1.995850622406639,
"grad_norm": 0.40499526262283325,
"learning_rate": 3.0400322558584308e-05,
"loss": 0.6747,
"step": 60
},
{
"epoch": 2.033195020746888,
"grad_norm": 1.1713193655014038,
"learning_rate": 3.0000000000000004e-05,
"loss": 1.2717,
"step": 61
},
{
"epoch": 2.066390041493776,
"grad_norm": 0.429723858833313,
"learning_rate": 2.959426226500493e-05,
"loss": 0.6141,
"step": 62
},
{
"epoch": 2.099585062240664,
"grad_norm": 0.5729584693908691,
"learning_rate": 2.9183329067740235e-05,
"loss": 0.6041,
"step": 63
},
{
"epoch": 2.132780082987552,
"grad_norm": 0.4673486649990082,
"learning_rate": 2.876742293578155e-05,
"loss": 0.5957,
"step": 64
},
{
"epoch": 2.1659751037344397,
"grad_norm": 0.5242981314659119,
"learning_rate": 2.834676908963636e-05,
"loss": 0.6047,
"step": 65
},
{
"epoch": 2.199170124481328,
"grad_norm": 0.4935699999332428,
"learning_rate": 2.792159532078314e-05,
"loss": 0.6015,
"step": 66
},
{
"epoch": 2.232365145228216,
"grad_norm": 0.4444994032382965,
"learning_rate": 2.7492131868318247e-05,
"loss": 0.6078,
"step": 67
},
{
"epoch": 2.2655601659751037,
"grad_norm": 0.5210160613059998,
"learning_rate": 2.7058611294277378e-05,
"loss": 0.6101,
"step": 68
},
{
"epoch": 2.2987551867219915,
"grad_norm": 0.41195791959762573,
"learning_rate": 2.6621268357699165e-05,
"loss": 0.5955,
"step": 69
},
{
"epoch": 2.33195020746888,
"grad_norm": 0.43684709072113037,
"learning_rate": 2.618033988749895e-05,
"loss": 0.5968,
"step": 70
},
{
"epoch": 2.3651452282157677,
"grad_norm": 0.4049234986305237,
"learning_rate": 2.5736064654221808e-05,
"loss": 0.5913,
"step": 71
},
{
"epoch": 2.3983402489626555,
"grad_norm": 0.3643253445625305,
"learning_rate": 2.528868324074405e-05,
"loss": 0.5973,
"step": 72
},
{
"epoch": 2.431535269709544,
"grad_norm": 0.3658091425895691,
"learning_rate": 2.4838437911993355e-05,
"loss": 0.6042,
"step": 73
},
{
"epoch": 2.4647302904564317,
"grad_norm": 0.3830958902835846,
"learning_rate": 2.4385572483758066e-05,
"loss": 0.585,
"step": 74
},
{
"epoch": 2.4979253112033195,
"grad_norm": 0.34319645166397095,
"learning_rate": 2.3930332190656604e-05,
"loss": 0.5876,
"step": 75
},
{
"epoch": 2.5311203319502074,
"grad_norm": 0.45185309648513794,
"learning_rate": 2.3472963553338614e-05,
"loss": 0.5955,
"step": 76
},
{
"epoch": 2.564315352697095,
"grad_norm": 0.32404085993766785,
"learning_rate": 2.3013714244989665e-05,
"loss": 0.5837,
"step": 77
},
{
"epoch": 2.5975103734439835,
"grad_norm": 0.3434089720249176,
"learning_rate": 2.25528329572119e-05,
"loss": 0.5919,
"step": 78
},
{
"epoch": 2.6307053941908713,
"grad_norm": 0.3299252986907959,
"learning_rate": 2.209056926535307e-05,
"loss": 0.5857,
"step": 79
},
{
"epoch": 2.663900414937759,
"grad_norm": 0.3217681348323822,
"learning_rate": 2.1627173493357167e-05,
"loss": 0.5825,
"step": 80
},
{
"epoch": 2.6970954356846475,
"grad_norm": 0.3666139245033264,
"learning_rate": 2.1162896578209517e-05,
"loss": 0.5904,
"step": 81
},
{
"epoch": 2.7302904564315353,
"grad_norm": 0.361447811126709,
"learning_rate": 2.0697989934050025e-05,
"loss": 0.6071,
"step": 82
},
{
"epoch": 2.763485477178423,
"grad_norm": 0.3171005845069885,
"learning_rate": 2.0232705316027946e-05,
"loss": 0.5848,
"step": 83
},
{
"epoch": 2.796680497925311,
"grad_norm": 0.345865398645401,
"learning_rate": 1.9767294683972064e-05,
"loss": 0.5939,
"step": 84
},
{
"epoch": 2.8298755186721993,
"grad_norm": 0.31210237741470337,
"learning_rate": 1.930201006594999e-05,
"loss": 0.6051,
"step": 85
},
{
"epoch": 2.863070539419087,
"grad_norm": 0.3219706416130066,
"learning_rate": 1.8837103421790486e-05,
"loss": 0.5933,
"step": 86
},
{
"epoch": 2.896265560165975,
"grad_norm": 0.32879552245140076,
"learning_rate": 1.837282650664284e-05,
"loss": 0.5758,
"step": 87
},
{
"epoch": 2.9294605809128633,
"grad_norm": 0.3011019229888916,
"learning_rate": 1.7909430734646936e-05,
"loss": 0.5859,
"step": 88
},
{
"epoch": 2.962655601659751,
"grad_norm": 0.3295615017414093,
"learning_rate": 1.7447167042788108e-05,
"loss": 0.5645,
"step": 89
},
{
"epoch": 2.995850622406639,
"grad_norm": 0.2640462815761566,
"learning_rate": 1.698628575501034e-05,
"loss": 0.5856,
"step": 90
},
{
"epoch": 3.033195020746888,
"grad_norm": 1.0564168691635132,
"learning_rate": 1.6527036446661396e-05,
"loss": 1.0863,
"step": 91
},
{
"epoch": 3.066390041493776,
"grad_norm": 0.4192248284816742,
"learning_rate": 1.6069667809343396e-05,
"loss": 0.5199,
"step": 92
},
{
"epoch": 3.099585062240664,
"grad_norm": 0.4637589454650879,
"learning_rate": 1.561442751624193e-05,
"loss": 0.5071,
"step": 93
},
{
"epoch": 3.132780082987552,
"grad_norm": 0.6578236222267151,
"learning_rate": 1.5161562088006649e-05,
"loss": 0.5143,
"step": 94
},
{
"epoch": 3.1659751037344397,
"grad_norm": 0.40501803159713745,
"learning_rate": 1.4711316759255963e-05,
"loss": 0.5296,
"step": 95
},
{
"epoch": 3.199170124481328,
"grad_norm": 0.4923658072948456,
"learning_rate": 1.4263935345778202e-05,
"loss": 0.5225,
"step": 96
},
{
"epoch": 3.232365145228216,
"grad_norm": 0.4242916703224182,
"learning_rate": 1.3819660112501054e-05,
"loss": 0.5174,
"step": 97
},
{
"epoch": 3.2655601659751037,
"grad_norm": 0.3603566288948059,
"learning_rate": 1.3378731642300841e-05,
"loss": 0.5172,
"step": 98
},
{
"epoch": 3.2987551867219915,
"grad_norm": 0.4046936631202698,
"learning_rate": 1.2941388705722627e-05,
"loss": 0.522,
"step": 99
},
{
"epoch": 3.33195020746888,
"grad_norm": 0.46352943778038025,
"learning_rate": 1.250786813168176e-05,
"loss": 0.5161,
"step": 100
},
{
"epoch": 3.3651452282157677,
"grad_norm": 0.3518240749835968,
"learning_rate": 1.2078404679216864e-05,
"loss": 0.5039,
"step": 101
},
{
"epoch": 3.3983402489626555,
"grad_norm": 0.3432175815105438,
"learning_rate": 1.1653230910363645e-05,
"loss": 0.5082,
"step": 102
},
{
"epoch": 3.431535269709544,
"grad_norm": 0.3457704484462738,
"learning_rate": 1.123257706421845e-05,
"loss": 0.5229,
"step": 103
},
{
"epoch": 3.4647302904564317,
"grad_norm": 0.3476507067680359,
"learning_rate": 1.0816670932259763e-05,
"loss": 0.508,
"step": 104
},
{
"epoch": 3.4979253112033195,
"grad_norm": 0.30545860528945923,
"learning_rate": 1.0405737734995083e-05,
"loss": 0.5112,
"step": 105
},
{
"epoch": 3.5311203319502074,
"grad_norm": 0.3181229829788208,
"learning_rate": 1.0000000000000006e-05,
"loss": 0.5243,
"step": 106
},
{
"epoch": 3.564315352697095,
"grad_norm": 0.3201195299625397,
"learning_rate": 9.599677441415694e-06,
"loss": 0.5098,
"step": 107
},
{
"epoch": 3.5975103734439835,
"grad_norm": 0.30049625039100647,
"learning_rate": 9.204986840969749e-06,
"loss": 0.4996,
"step": 108
},
{
"epoch": 3.6307053941908713,
"grad_norm": 0.27741697430610657,
"learning_rate": 8.816141930585067e-06,
"loss": 0.4927,
"step": 109
},
{
"epoch": 3.663900414937759,
"grad_norm": 0.28835639357566833,
"learning_rate": 8.43335327664027e-06,
"loss": 0.514,
"step": 110
}
],
"logging_steps": 1,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1346592210681856.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}