exp_3218_135_ep5_step-annotation / trainer_state.json
leo-liuzy's picture
Upload folder using huggingface_hub
7f5aa47 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 135,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.037037037037037035,
"grad_norm": 3.889946460723877,
"learning_rate": 0.0,
"loss": 1.0255,
"step": 1
},
{
"epoch": 0.07407407407407407,
"grad_norm": 3.7213633060455322,
"learning_rate": 3.846153846153847e-06,
"loss": 0.99,
"step": 2
},
{
"epoch": 0.1111111111111111,
"grad_norm": 2.358224868774414,
"learning_rate": 7.692307692307694e-06,
"loss": 1.0113,
"step": 3
},
{
"epoch": 0.14814814814814814,
"grad_norm": 3.7503502368927,
"learning_rate": 1.153846153846154e-05,
"loss": 0.952,
"step": 4
},
{
"epoch": 0.18518518518518517,
"grad_norm": 6.118247032165527,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.0118,
"step": 5
},
{
"epoch": 0.2222222222222222,
"grad_norm": 11.75301456451416,
"learning_rate": 1.923076923076923e-05,
"loss": 1.0014,
"step": 6
},
{
"epoch": 0.25925925925925924,
"grad_norm": 2.9081552028656006,
"learning_rate": 2.307692307692308e-05,
"loss": 1.0193,
"step": 7
},
{
"epoch": 0.2962962962962963,
"grad_norm": 4.145256996154785,
"learning_rate": 2.6923076923076923e-05,
"loss": 0.9929,
"step": 8
},
{
"epoch": 0.3333333333333333,
"grad_norm": 2.380268096923828,
"learning_rate": 3.0769230769230774e-05,
"loss": 0.968,
"step": 9
},
{
"epoch": 0.37037037037037035,
"grad_norm": 2.615178108215332,
"learning_rate": 3.461538461538462e-05,
"loss": 0.9375,
"step": 10
},
{
"epoch": 0.4074074074074074,
"grad_norm": 2.8492236137390137,
"learning_rate": 3.846153846153846e-05,
"loss": 0.8548,
"step": 11
},
{
"epoch": 0.4444444444444444,
"grad_norm": 3.7682080268859863,
"learning_rate": 4.230769230769231e-05,
"loss": 0.9339,
"step": 12
},
{
"epoch": 0.48148148148148145,
"grad_norm": 2.53898286819458,
"learning_rate": 4.615384615384616e-05,
"loss": 0.8744,
"step": 13
},
{
"epoch": 0.5185185185185185,
"grad_norm": 3.0268638134002686,
"learning_rate": 5e-05,
"loss": 0.8486,
"step": 14
},
{
"epoch": 0.5555555555555556,
"grad_norm": 1.9204188585281372,
"learning_rate": 4.9992540519072045e-05,
"loss": 0.8193,
"step": 15
},
{
"epoch": 0.5925925925925926,
"grad_norm": 2.533561944961548,
"learning_rate": 4.9970167022408685e-05,
"loss": 0.8744,
"step": 16
},
{
"epoch": 0.6296296296296297,
"grad_norm": 1.509847640991211,
"learning_rate": 4.993289434509185e-05,
"loss": 0.849,
"step": 17
},
{
"epoch": 0.6666666666666666,
"grad_norm": 1.8710334300994873,
"learning_rate": 4.988074720132825e-05,
"loss": 0.7792,
"step": 18
},
{
"epoch": 0.7037037037037037,
"grad_norm": 1.9412447214126587,
"learning_rate": 4.9813760168062285e-05,
"loss": 0.8514,
"step": 19
},
{
"epoch": 0.7407407407407407,
"grad_norm": 1.3795047998428345,
"learning_rate": 4.9731977662049233e-05,
"loss": 0.8437,
"step": 20
},
{
"epoch": 0.7777777777777778,
"grad_norm": 1.3450686931610107,
"learning_rate": 4.9635453910404125e-05,
"loss": 0.7967,
"step": 21
},
{
"epoch": 0.8148148148148148,
"grad_norm": 1.3413690328598022,
"learning_rate": 4.9524252914645555e-05,
"loss": 0.8021,
"step": 22
},
{
"epoch": 0.8518518518518519,
"grad_norm": 1.650895357131958,
"learning_rate": 4.939844840825861e-05,
"loss": 0.798,
"step": 23
},
{
"epoch": 0.8888888888888888,
"grad_norm": 1.3016217947006226,
"learning_rate": 4.9258123807804715e-05,
"loss": 0.8069,
"step": 24
},
{
"epoch": 0.9259259259259259,
"grad_norm": 1.2827719449996948,
"learning_rate": 4.910337215761104e-05,
"loss": 0.7925,
"step": 25
},
{
"epoch": 0.9629629629629629,
"grad_norm": 1.6255881786346436,
"learning_rate": 4.8934296068076105e-05,
"loss": 0.823,
"step": 26
},
{
"epoch": 1.0,
"grad_norm": 1.3472081422805786,
"learning_rate": 4.875100764763238e-05,
"loss": 0.8121,
"step": 27
},
{
"epoch": 1.037037037037037,
"grad_norm": 1.3420181274414062,
"learning_rate": 4.855362842841111e-05,
"loss": 0.7562,
"step": 28
},
{
"epoch": 1.074074074074074,
"grad_norm": 1.0205856561660767,
"learning_rate": 4.834228928565864e-05,
"loss": 0.7176,
"step": 29
},
{
"epoch": 1.1111111111111112,
"grad_norm": 1.5821263790130615,
"learning_rate": 4.811713035095761e-05,
"loss": 0.7531,
"step": 30
},
{
"epoch": 1.1481481481481481,
"grad_norm": 1.0714203119277954,
"learning_rate": 4.7878300919310606e-05,
"loss": 0.7189,
"step": 31
},
{
"epoch": 1.1851851851851851,
"grad_norm": 1.2463059425354004,
"learning_rate": 4.7625959350147905e-05,
"loss": 0.7524,
"step": 32
},
{
"epoch": 1.2222222222222223,
"grad_norm": 1.087406039237976,
"learning_rate": 4.7360272962324815e-05,
"loss": 0.7422,
"step": 33
},
{
"epoch": 1.2592592592592593,
"grad_norm": 1.4159847497940063,
"learning_rate": 4.70814179231785e-05,
"loss": 0.7596,
"step": 34
},
{
"epoch": 1.2962962962962963,
"grad_norm": 1.084489345550537,
"learning_rate": 4.678957913171748e-05,
"loss": 0.7427,
"step": 35
},
{
"epoch": 1.3333333333333333,
"grad_norm": 1.2119760513305664,
"learning_rate": 4.648495009602168e-05,
"loss": 0.7429,
"step": 36
},
{
"epoch": 1.3703703703703702,
"grad_norm": 1.1701514720916748,
"learning_rate": 4.616773280493393e-05,
"loss": 0.7292,
"step": 37
},
{
"epoch": 1.4074074074074074,
"grad_norm": 1.2118418216705322,
"learning_rate": 4.5838137594128254e-05,
"loss": 0.6576,
"step": 38
},
{
"epoch": 1.4444444444444444,
"grad_norm": 1.8038779497146606,
"learning_rate": 4.549638300664369e-05,
"loss": 0.7537,
"step": 39
},
{
"epoch": 1.4814814814814814,
"grad_norm": 1.0828356742858887,
"learning_rate": 4.5142695647975993e-05,
"loss": 0.6865,
"step": 40
},
{
"epoch": 1.5185185185185186,
"grad_norm": 1.5504570007324219,
"learning_rate": 4.47773100358235e-05,
"loss": 0.6754,
"step": 41
},
{
"epoch": 1.5555555555555556,
"grad_norm": 1.0614900588989258,
"learning_rate": 4.44004684445867e-05,
"loss": 0.6384,
"step": 42
},
{
"epoch": 1.5925925925925926,
"grad_norm": 1.7621930837631226,
"learning_rate": 4.401242074472448e-05,
"loss": 0.7096,
"step": 43
},
{
"epoch": 1.6296296296296298,
"grad_norm": 1.2761889696121216,
"learning_rate": 4.361342423707385e-05,
"loss": 0.6737,
"step": 44
},
{
"epoch": 1.6666666666666665,
"grad_norm": 1.1437257528305054,
"learning_rate": 4.320374348224273e-05,
"loss": 0.6176,
"step": 45
},
{
"epoch": 1.7037037037037037,
"grad_norm": 1.2675424814224243,
"learning_rate": 4.2783650125189096e-05,
"loss": 0.6927,
"step": 46
},
{
"epoch": 1.7407407407407407,
"grad_norm": 1.4404767751693726,
"learning_rate": 4.2353422715102714e-05,
"loss": 0.674,
"step": 47
},
{
"epoch": 1.7777777777777777,
"grad_norm": 1.0773519277572632,
"learning_rate": 4.191334652070895e-05,
"loss": 0.6334,
"step": 48
},
{
"epoch": 1.8148148148148149,
"grad_norm": 1.1088582277297974,
"learning_rate": 4.146371334111702e-05,
"loss": 0.6316,
"step": 49
},
{
"epoch": 1.8518518518518519,
"grad_norm": 1.1710089445114136,
"learning_rate": 4.1004821312338285e-05,
"loss": 0.6552,
"step": 50
},
{
"epoch": 1.8888888888888888,
"grad_norm": 1.1748957633972168,
"learning_rate": 4.053697470960268e-05,
"loss": 0.647,
"step": 51
},
{
"epoch": 1.925925925925926,
"grad_norm": 0.9752252697944641,
"learning_rate": 4.006048374560445e-05,
"loss": 0.6347,
"step": 52
},
{
"epoch": 1.9629629629629628,
"grad_norm": 1.0895061492919922,
"learning_rate": 3.9575664364811015e-05,
"loss": 0.6857,
"step": 53
},
{
"epoch": 2.0,
"grad_norm": 1.7490938901901245,
"learning_rate": 3.90828380339712e-05,
"loss": 0.6664,
"step": 54
},
{
"epoch": 2.037037037037037,
"grad_norm": 1.1272878646850586,
"learning_rate": 3.858233152896195e-05,
"loss": 0.633,
"step": 55
},
{
"epoch": 2.074074074074074,
"grad_norm": 1.1926578283309937,
"learning_rate": 3.8074476718114706e-05,
"loss": 0.5644,
"step": 56
},
{
"epoch": 2.111111111111111,
"grad_norm": 1.3418350219726562,
"learning_rate": 3.7559610342165064e-05,
"loss": 0.6357,
"step": 57
},
{
"epoch": 2.148148148148148,
"grad_norm": 1.3300039768218994,
"learning_rate": 3.7038073790971875e-05,
"loss": 0.5759,
"step": 58
},
{
"epoch": 2.185185185185185,
"grad_norm": 1.1557310819625854,
"learning_rate": 3.65102128771535e-05,
"loss": 0.6032,
"step": 59
},
{
"epoch": 2.2222222222222223,
"grad_norm": 1.384039044380188,
"learning_rate": 3.597637760679167e-05,
"loss": 0.5991,
"step": 60
},
{
"epoch": 2.259259259259259,
"grad_norm": 1.12711763381958,
"learning_rate": 3.543692194735464e-05,
"loss": 0.6423,
"step": 61
},
{
"epoch": 2.2962962962962963,
"grad_norm": 1.4164304733276367,
"learning_rate": 3.4892203592993786e-05,
"loss": 0.6083,
"step": 62
},
{
"epoch": 2.3333333333333335,
"grad_norm": 1.0840896368026733,
"learning_rate": 3.434258372736915e-05,
"loss": 0.619,
"step": 63
},
{
"epoch": 2.3703703703703702,
"grad_norm": 1.3002837896347046,
"learning_rate": 3.3788426784161216e-05,
"loss": 0.6012,
"step": 64
},
{
"epoch": 2.4074074074074074,
"grad_norm": 1.4222713708877563,
"learning_rate": 3.323010020542765e-05,
"loss": 0.5493,
"step": 65
},
{
"epoch": 2.4444444444444446,
"grad_norm": 1.0047050714492798,
"learning_rate": 3.2667974197965405e-05,
"loss": 0.6409,
"step": 66
},
{
"epoch": 2.4814814814814814,
"grad_norm": 1.1294559240341187,
"learning_rate": 3.210242148783952e-05,
"loss": 0.5421,
"step": 67
},
{
"epoch": 2.5185185185185186,
"grad_norm": 1.3252534866333008,
"learning_rate": 3.1533817073241556e-05,
"loss": 0.5746,
"step": 68
},
{
"epoch": 2.5555555555555554,
"grad_norm": 1.2919150590896606,
"learning_rate": 3.096253797584139e-05,
"loss": 0.5149,
"step": 69
},
{
"epoch": 2.5925925925925926,
"grad_norm": 1.1059527397155762,
"learning_rate": 3.03889629907974e-05,
"loss": 0.6009,
"step": 70
},
{
"epoch": 2.6296296296296298,
"grad_norm": 1.0076607465744019,
"learning_rate": 2.981347243559061e-05,
"loss": 0.551,
"step": 71
},
{
"epoch": 2.6666666666666665,
"grad_norm": 1.179908037185669,
"learning_rate": 2.923644789784955e-05,
"loss": 0.4946,
"step": 72
},
{
"epoch": 2.7037037037037037,
"grad_norm": 1.197548508644104,
"learning_rate": 2.86582719823328e-05,
"loss": 0.5694,
"step": 73
},
{
"epoch": 2.7407407407407405,
"grad_norm": 1.1806666851043701,
"learning_rate": 2.807932805723725e-05,
"loss": 0.5638,
"step": 74
},
{
"epoch": 2.7777777777777777,
"grad_norm": 1.3463941812515259,
"learning_rate": 2.7500000000000004e-05,
"loss": 0.5082,
"step": 75
},
{
"epoch": 2.814814814814815,
"grad_norm": 1.1747078895568848,
"learning_rate": 2.692067194276276e-05,
"loss": 0.5078,
"step": 76
},
{
"epoch": 2.851851851851852,
"grad_norm": 1.1876875162124634,
"learning_rate": 2.6341728017667205e-05,
"loss": 0.5297,
"step": 77
},
{
"epoch": 2.888888888888889,
"grad_norm": 1.0407406091690063,
"learning_rate": 2.5763552102150456e-05,
"loss": 0.5319,
"step": 78
},
{
"epoch": 2.925925925925926,
"grad_norm": 1.3862707614898682,
"learning_rate": 2.518652756440939e-05,
"loss": 0.4949,
"step": 79
},
{
"epoch": 2.962962962962963,
"grad_norm": 1.0679614543914795,
"learning_rate": 2.4611037009202603e-05,
"loss": 0.5533,
"step": 80
},
{
"epoch": 3.0,
"grad_norm": 1.4187452793121338,
"learning_rate": 2.4037462024158607e-05,
"loss": 0.574,
"step": 81
},
{
"epoch": 3.037037037037037,
"grad_norm": 1.0800102949142456,
"learning_rate": 2.3466182926758456e-05,
"loss": 0.5141,
"step": 82
},
{
"epoch": 3.074074074074074,
"grad_norm": 1.321203351020813,
"learning_rate": 2.289757851216049e-05,
"loss": 0.4526,
"step": 83
},
{
"epoch": 3.111111111111111,
"grad_norm": 1.0968624353408813,
"learning_rate": 2.2332025802034607e-05,
"loss": 0.523,
"step": 84
},
{
"epoch": 3.148148148148148,
"grad_norm": 1.173933744430542,
"learning_rate": 2.176989979457236e-05,
"loss": 0.4723,
"step": 85
},
{
"epoch": 3.185185185185185,
"grad_norm": 1.1096935272216797,
"learning_rate": 2.1211573215838792e-05,
"loss": 0.4802,
"step": 86
},
{
"epoch": 3.2222222222222223,
"grad_norm": 1.2036195993423462,
"learning_rate": 2.0657416272630853e-05,
"loss": 0.4906,
"step": 87
},
{
"epoch": 3.259259259259259,
"grad_norm": 1.3066295385360718,
"learning_rate": 2.010779640700622e-05,
"loss": 0.5162,
"step": 88
},
{
"epoch": 3.2962962962962963,
"grad_norm": 1.0549880266189575,
"learning_rate": 1.9563078052645367e-05,
"loss": 0.5089,
"step": 89
},
{
"epoch": 3.3333333333333335,
"grad_norm": 1.0702770948410034,
"learning_rate": 1.9023622393208336e-05,
"loss": 0.4899,
"step": 90
},
{
"epoch": 3.3703703703703702,
"grad_norm": 1.1546988487243652,
"learning_rate": 1.84897871228465e-05,
"loss": 0.4927,
"step": 91
},
{
"epoch": 3.4074074074074074,
"grad_norm": 1.0201380252838135,
"learning_rate": 1.796192620902814e-05,
"loss": 0.4419,
"step": 92
},
{
"epoch": 3.4444444444444446,
"grad_norm": 1.1780834197998047,
"learning_rate": 1.744038965783493e-05,
"loss": 0.505,
"step": 93
},
{
"epoch": 3.4814814814814814,
"grad_norm": 1.4081705808639526,
"learning_rate": 1.692552328188531e-05,
"loss": 0.4298,
"step": 94
},
{
"epoch": 3.5185185185185186,
"grad_norm": 1.1309189796447754,
"learning_rate": 1.6417668471038057e-05,
"loss": 0.4648,
"step": 95
},
{
"epoch": 3.5555555555555554,
"grad_norm": 1.2165582180023193,
"learning_rate": 1.5917161966028815e-05,
"loss": 0.4143,
"step": 96
},
{
"epoch": 3.5925925925925926,
"grad_norm": 1.1092609167099,
"learning_rate": 1.542433563518899e-05,
"loss": 0.4837,
"step": 97
},
{
"epoch": 3.6296296296296298,
"grad_norm": 1.0920662879943848,
"learning_rate": 1.4939516254395546e-05,
"loss": 0.4345,
"step": 98
},
{
"epoch": 3.6666666666666665,
"grad_norm": 1.0206769704818726,
"learning_rate": 1.446302529039732e-05,
"loss": 0.3851,
"step": 99
},
{
"epoch": 3.7037037037037037,
"grad_norm": 1.0061428546905518,
"learning_rate": 1.399517868766172e-05,
"loss": 0.4538,
"step": 100
},
{
"epoch": 3.7407407407407405,
"grad_norm": 1.2543085813522339,
"learning_rate": 1.3536286658882989e-05,
"loss": 0.4501,
"step": 101
},
{
"epoch": 3.7777777777777777,
"grad_norm": 1.2222250699996948,
"learning_rate": 1.3086653479291062e-05,
"loss": 0.4127,
"step": 102
},
{
"epoch": 3.814814814814815,
"grad_norm": 1.0591493844985962,
"learning_rate": 1.2646577284897284e-05,
"loss": 0.4031,
"step": 103
},
{
"epoch": 3.851851851851852,
"grad_norm": 1.2473130226135254,
"learning_rate": 1.2216349874810906e-05,
"loss": 0.4178,
"step": 104
},
{
"epoch": 3.888888888888889,
"grad_norm": 1.3380539417266846,
"learning_rate": 1.1796256517757267e-05,
"loss": 0.4176,
"step": 105
},
{
"epoch": 3.925925925925926,
"grad_norm": 1.4147883653640747,
"learning_rate": 1.1386575762926155e-05,
"loss": 0.4069,
"step": 106
},
{
"epoch": 3.962962962962963,
"grad_norm": 1.1942764520645142,
"learning_rate": 1.0987579255275524e-05,
"loss": 0.4365,
"step": 107
},
{
"epoch": 4.0,
"grad_norm": 1.0244306325912476,
"learning_rate": 1.0599531555413309e-05,
"loss": 0.4665,
"step": 108
},
{
"epoch": 4.037037037037037,
"grad_norm": 1.2454806566238403,
"learning_rate": 1.0222689964176502e-05,
"loss": 0.4011,
"step": 109
},
{
"epoch": 4.074074074074074,
"grad_norm": 0.9134472608566284,
"learning_rate": 9.857304352024019e-06,
"loss": 0.3615,
"step": 110
},
{
"epoch": 4.111111111111111,
"grad_norm": 0.9509828090667725,
"learning_rate": 9.503616993356315e-06,
"loss": 0.4154,
"step": 111
},
{
"epoch": 4.148148148148148,
"grad_norm": 0.8373136520385742,
"learning_rate": 9.161862405871748e-06,
"loss": 0.3683,
"step": 112
},
{
"epoch": 4.185185185185185,
"grad_norm": 0.9871260523796082,
"learning_rate": 8.832267195066075e-06,
"loss": 0.3695,
"step": 113
},
{
"epoch": 4.222222222222222,
"grad_norm": 1.120859980583191,
"learning_rate": 8.515049903978325e-06,
"loss": 0.3897,
"step": 114
},
{
"epoch": 4.2592592592592595,
"grad_norm": 1.2524614334106445,
"learning_rate": 8.210420868282522e-06,
"loss": 0.4148,
"step": 115
},
{
"epoch": 4.296296296296296,
"grad_norm": 1.1435761451721191,
"learning_rate": 7.918582076821507e-06,
"loss": 0.4006,
"step": 116
},
{
"epoch": 4.333333333333333,
"grad_norm": 1.2172073125839233,
"learning_rate": 7.639727037675181e-06,
"loss": 0.3915,
"step": 117
},
{
"epoch": 4.37037037037037,
"grad_norm": 1.1755033731460571,
"learning_rate": 7.374040649852105e-06,
"loss": 0.4021,
"step": 118
},
{
"epoch": 4.407407407407407,
"grad_norm": 1.001423716545105,
"learning_rate": 7.121699080689394e-06,
"loss": 0.3457,
"step": 119
},
{
"epoch": 4.444444444444445,
"grad_norm": 1.1704602241516113,
"learning_rate": 6.882869649042397e-06,
"loss": 0.4105,
"step": 120
},
{
"epoch": 4.481481481481482,
"grad_norm": 1.072298288345337,
"learning_rate": 6.657710714341364e-06,
"loss": 0.3461,
"step": 121
},
{
"epoch": 4.518518518518518,
"grad_norm": 0.9239315986633301,
"learning_rate": 6.446371571588896e-06,
"loss": 0.3633,
"step": 122
},
{
"epoch": 4.555555555555555,
"grad_norm": 0.8906875252723694,
"learning_rate": 6.248992352367622e-06,
"loss": 0.3273,
"step": 123
},
{
"epoch": 4.592592592592593,
"grad_norm": 0.9496861696243286,
"learning_rate": 6.065703931923894e-06,
"loss": 0.391,
"step": 124
},
{
"epoch": 4.62962962962963,
"grad_norm": 0.9626538753509521,
"learning_rate": 5.896627842388961e-06,
"loss": 0.3506,
"step": 125
},
{
"epoch": 4.666666666666667,
"grad_norm": 0.8809230327606201,
"learning_rate": 5.741876192195292e-06,
"loss": 0.2992,
"step": 126
},
{
"epoch": 4.703703703703704,
"grad_norm": 1.044419288635254,
"learning_rate": 5.601551591741394e-06,
"loss": 0.3594,
"step": 127
},
{
"epoch": 4.7407407407407405,
"grad_norm": 1.0764929056167603,
"learning_rate": 5.47574708535445e-06,
"loss": 0.3684,
"step": 128
},
{
"epoch": 4.777777777777778,
"grad_norm": 1.0782520771026611,
"learning_rate": 5.364546089595883e-06,
"loss": 0.3323,
"step": 129
},
{
"epoch": 4.814814814814815,
"grad_norm": 0.9487962126731873,
"learning_rate": 5.268022337950767e-06,
"loss": 0.3182,
"step": 130
},
{
"epoch": 4.851851851851852,
"grad_norm": 1.0227851867675781,
"learning_rate": 5.186239831937717e-06,
"loss": 0.3416,
"step": 131
},
{
"epoch": 4.888888888888889,
"grad_norm": 1.0411008596420288,
"learning_rate": 5.119252798671747e-06,
"loss": 0.3446,
"step": 132
},
{
"epoch": 4.925925925925926,
"grad_norm": 0.8823951482772827,
"learning_rate": 5.0671056549081495e-06,
"loss": 0.3355,
"step": 133
},
{
"epoch": 4.962962962962963,
"grad_norm": 0.8825088739395142,
"learning_rate": 5.029832977591314e-06,
"loss": 0.3553,
"step": 134
},
{
"epoch": 5.0,
"grad_norm": 1.035265326499939,
"learning_rate": 5.007459480927957e-06,
"loss": 0.3773,
"step": 135
}
],
"logging_steps": 1,
"max_steps": 135,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 27,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.2332169934340096e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}