Qwen2.5-7B-Instruct-method3 / trainer_state.json
luowenyang's picture
Upload folder using huggingface_hub
ca93f20 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1320,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022727272727272728,
"grad_norm": 6.35440244262687,
"learning_rate": 7.575757575757576e-07,
"loss": 0.7684,
"step": 10
},
{
"epoch": 0.045454545454545456,
"grad_norm": 2.539886425067851,
"learning_rate": 1.5151515151515152e-06,
"loss": 0.6837,
"step": 20
},
{
"epoch": 0.06818181818181818,
"grad_norm": 2.459810303110342,
"learning_rate": 2.2727272727272728e-06,
"loss": 0.6466,
"step": 30
},
{
"epoch": 0.09090909090909091,
"grad_norm": 1.770796632740967,
"learning_rate": 3.0303030303030305e-06,
"loss": 0.5912,
"step": 40
},
{
"epoch": 0.11363636363636363,
"grad_norm": 1.2695649352744178,
"learning_rate": 3.7878787878787882e-06,
"loss": 0.5324,
"step": 50
},
{
"epoch": 0.13636363636363635,
"grad_norm": 1.1616146630386306,
"learning_rate": 4.5454545454545455e-06,
"loss": 0.519,
"step": 60
},
{
"epoch": 0.1590909090909091,
"grad_norm": 1.2275679597942535,
"learning_rate": 5.303030303030303e-06,
"loss": 0.5089,
"step": 70
},
{
"epoch": 0.18181818181818182,
"grad_norm": 1.2356666530916727,
"learning_rate": 6.060606060606061e-06,
"loss": 0.4981,
"step": 80
},
{
"epoch": 0.20454545454545456,
"grad_norm": 1.0321368603514207,
"learning_rate": 6.818181818181818e-06,
"loss": 0.5116,
"step": 90
},
{
"epoch": 0.22727272727272727,
"grad_norm": 1.2554331662611178,
"learning_rate": 7.5757575757575764e-06,
"loss": 0.4969,
"step": 100
},
{
"epoch": 0.25,
"grad_norm": 1.108592526031677,
"learning_rate": 8.333333333333334e-06,
"loss": 0.4667,
"step": 110
},
{
"epoch": 0.2727272727272727,
"grad_norm": 1.2536541822586402,
"learning_rate": 9.090909090909091e-06,
"loss": 0.4538,
"step": 120
},
{
"epoch": 0.29545454545454547,
"grad_norm": 1.085405743643047,
"learning_rate": 9.84848484848485e-06,
"loss": 0.4547,
"step": 130
},
{
"epoch": 0.3181818181818182,
"grad_norm": 1.3093680137962422,
"learning_rate": 9.99888115313551e-06,
"loss": 0.4857,
"step": 140
},
{
"epoch": 0.3409090909090909,
"grad_norm": 1.3773445549991983,
"learning_rate": 9.994336695915041e-06,
"loss": 0.4805,
"step": 150
},
{
"epoch": 0.36363636363636365,
"grad_norm": 1.591560589841802,
"learning_rate": 9.986299875742612e-06,
"loss": 0.4629,
"step": 160
},
{
"epoch": 0.38636363636363635,
"grad_norm": 1.4004153271538835,
"learning_rate": 9.97477631248223e-06,
"loss": 0.4791,
"step": 170
},
{
"epoch": 0.4090909090909091,
"grad_norm": 1.0354548471138036,
"learning_rate": 9.959774064153977e-06,
"loss": 0.452,
"step": 180
},
{
"epoch": 0.4318181818181818,
"grad_norm": 1.2208714972404657,
"learning_rate": 9.941303621299332e-06,
"loss": 0.4505,
"step": 190
},
{
"epoch": 0.45454545454545453,
"grad_norm": 1.1847279130783284,
"learning_rate": 9.919377899645497e-06,
"loss": 0.4539,
"step": 200
},
{
"epoch": 0.4772727272727273,
"grad_norm": 1.1879951591803597,
"learning_rate": 9.894012231073895e-06,
"loss": 0.4473,
"step": 210
},
{
"epoch": 0.5,
"grad_norm": 1.705038237865607,
"learning_rate": 9.86522435289912e-06,
"loss": 0.4794,
"step": 220
},
{
"epoch": 0.5227272727272727,
"grad_norm": 1.3352984955766194,
"learning_rate": 9.833034395465866e-06,
"loss": 0.4437,
"step": 230
},
{
"epoch": 0.5454545454545454,
"grad_norm": 1.3789144895455978,
"learning_rate": 9.797464868072489e-06,
"loss": 0.4667,
"step": 240
},
{
"epoch": 0.5681818181818182,
"grad_norm": 1.0952159859041897,
"learning_rate": 9.758540643231041e-06,
"loss": 0.4545,
"step": 250
},
{
"epoch": 0.5909090909090909,
"grad_norm": 1.071210265571984,
"learning_rate": 9.716288939274818e-06,
"loss": 0.4632,
"step": 260
},
{
"epoch": 0.6136363636363636,
"grad_norm": 1.2854473189577795,
"learning_rate": 9.670739301325534e-06,
"loss": 0.461,
"step": 270
},
{
"epoch": 0.6363636363636364,
"grad_norm": 1.0349444641905114,
"learning_rate": 9.621923580633462e-06,
"loss": 0.4503,
"step": 280
},
{
"epoch": 0.6590909090909091,
"grad_norm": 1.0305581230268586,
"learning_rate": 9.56987591230498e-06,
"loss": 0.448,
"step": 290
},
{
"epoch": 0.6818181818181818,
"grad_norm": 1.0379340882310921,
"learning_rate": 9.514632691433108e-06,
"loss": 0.4233,
"step": 300
},
{
"epoch": 0.7045454545454546,
"grad_norm": 1.0211063705976775,
"learning_rate": 9.456232547647695e-06,
"loss": 0.4611,
"step": 310
},
{
"epoch": 0.7272727272727273,
"grad_norm": 1.270861551615867,
"learning_rate": 9.394716318103098e-06,
"loss": 0.4666,
"step": 320
},
{
"epoch": 0.75,
"grad_norm": 1.4358106750611455,
"learning_rate": 9.330127018922195e-06,
"loss": 0.4311,
"step": 330
},
{
"epoch": 0.7727272727272727,
"grad_norm": 0.9211243850184648,
"learning_rate": 9.262509815116732e-06,
"loss": 0.4625,
"step": 340
},
{
"epoch": 0.7954545454545454,
"grad_norm": 1.0471205669074248,
"learning_rate": 9.191911989005038e-06,
"loss": 0.4385,
"step": 350
},
{
"epoch": 0.8181818181818182,
"grad_norm": 1.1367135885228616,
"learning_rate": 9.118382907149164e-06,
"loss": 0.4504,
"step": 360
},
{
"epoch": 0.8409090909090909,
"grad_norm": 0.8937203983205025,
"learning_rate": 9.041973985834595e-06,
"loss": 0.4201,
"step": 370
},
{
"epoch": 0.8636363636363636,
"grad_norm": 1.1449338266575286,
"learning_rate": 8.96273865511666e-06,
"loss": 0.4332,
"step": 380
},
{
"epoch": 0.8863636363636364,
"grad_norm": 1.0137721464541551,
"learning_rate": 8.880732321458785e-06,
"loss": 0.4688,
"step": 390
},
{
"epoch": 0.9090909090909091,
"grad_norm": 1.141330043928384,
"learning_rate": 8.796012328988716e-06,
"loss": 0.4486,
"step": 400
},
{
"epoch": 0.9318181818181818,
"grad_norm": 0.9565319534405999,
"learning_rate": 8.708637919399798e-06,
"loss": 0.4769,
"step": 410
},
{
"epoch": 0.9545454545454546,
"grad_norm": 0.8984018100341509,
"learning_rate": 8.61867019052535e-06,
"loss": 0.432,
"step": 420
},
{
"epoch": 0.9772727272727273,
"grad_norm": 0.9304841359661392,
"learning_rate": 8.526172053615122e-06,
"loss": 0.4345,
"step": 430
},
{
"epoch": 1.0,
"grad_norm": 0.8768633210220393,
"learning_rate": 8.43120818934367e-06,
"loss": 0.4371,
"step": 440
},
{
"epoch": 1.0227272727272727,
"grad_norm": 1.1311095568028267,
"learning_rate": 8.33384500258146e-06,
"loss": 0.3547,
"step": 450
},
{
"epoch": 1.0454545454545454,
"grad_norm": 0.8562280421736388,
"learning_rate": 8.234150575960288e-06,
"loss": 0.344,
"step": 460
},
{
"epoch": 1.0681818181818181,
"grad_norm": 0.9841335785316842,
"learning_rate": 8.132194622265508e-06,
"loss": 0.3462,
"step": 470
},
{
"epoch": 1.0909090909090908,
"grad_norm": 1.045774931721029,
"learning_rate": 8.028048435688333e-06,
"loss": 0.3285,
"step": 480
},
{
"epoch": 1.1136363636363635,
"grad_norm": 0.8604729577213927,
"learning_rate": 7.921784841972355e-06,
"loss": 0.344,
"step": 490
},
{
"epoch": 1.1363636363636362,
"grad_norm": 0.9278271486506232,
"learning_rate": 7.813478147489052e-06,
"loss": 0.3357,
"step": 500
},
{
"epoch": 1.1590909090909092,
"grad_norm": 1.0237784505259664,
"learning_rate": 7.703204087277989e-06,
"loss": 0.3536,
"step": 510
},
{
"epoch": 1.1818181818181819,
"grad_norm": 0.8356075391269556,
"learning_rate": 7.5910397720879785e-06,
"loss": 0.3366,
"step": 520
},
{
"epoch": 1.2045454545454546,
"grad_norm": 0.9979040732435491,
"learning_rate": 7.477063634456263e-06,
"loss": 0.31,
"step": 530
},
{
"epoch": 1.2272727272727273,
"grad_norm": 1.0044525302301133,
"learning_rate": 7.361355373863415e-06,
"loss": 0.3211,
"step": 540
},
{
"epoch": 1.25,
"grad_norm": 2.2937575511435897,
"learning_rate": 7.243995901002312e-06,
"loss": 0.3532,
"step": 550
},
{
"epoch": 1.2727272727272727,
"grad_norm": 1.02382051382484,
"learning_rate": 7.1250672812001505e-06,
"loss": 0.3113,
"step": 560
},
{
"epoch": 1.2954545454545454,
"grad_norm": 0.8952595339853402,
"learning_rate": 7.004652677033069e-06,
"loss": 0.3264,
"step": 570
},
{
"epoch": 1.3181818181818181,
"grad_norm": 0.8831005766886504,
"learning_rate": 6.882836290173493e-06,
"loss": 0.3233,
"step": 580
},
{
"epoch": 1.3409090909090908,
"grad_norm": 0.7901321424383204,
"learning_rate": 6.759703302510898e-06,
"loss": 0.3352,
"step": 590
},
{
"epoch": 1.3636363636363638,
"grad_norm": 1.2262968003160024,
"learning_rate": 6.635339816587109e-06,
"loss": 0.3177,
"step": 600
},
{
"epoch": 1.3863636363636362,
"grad_norm": 0.7917420857688439,
"learning_rate": 6.5098327953878585e-06,
"loss": 0.3468,
"step": 610
},
{
"epoch": 1.4090909090909092,
"grad_norm": 1.0420360755184277,
"learning_rate": 6.383270001532636e-06,
"loss": 0.3253,
"step": 620
},
{
"epoch": 1.4318181818181819,
"grad_norm": 1.0205657171970297,
"learning_rate": 6.255739935905396e-06,
"loss": 0.3261,
"step": 630
},
{
"epoch": 1.4545454545454546,
"grad_norm": 1.251378392945878,
"learning_rate": 6.127331775769023e-06,
"loss": 0.314,
"step": 640
},
{
"epoch": 1.4772727272727273,
"grad_norm": 0.9207759342389322,
"learning_rate": 5.998135312406821e-06,
"loss": 0.3113,
"step": 650
},
{
"epoch": 1.5,
"grad_norm": 0.931014052691459,
"learning_rate": 5.8682408883346535e-06,
"loss": 0.3261,
"step": 660
},
{
"epoch": 1.5227272727272727,
"grad_norm": 1.2292042441399877,
"learning_rate": 5.737739334127611e-06,
"loss": 0.3195,
"step": 670
},
{
"epoch": 1.5454545454545454,
"grad_norm": 0.908649250490574,
"learning_rate": 5.60672190490541e-06,
"loss": 0.3242,
"step": 680
},
{
"epoch": 1.5681818181818183,
"grad_norm": 1.0094095236232232,
"learning_rate": 5.475280216520913e-06,
"loss": 0.3272,
"step": 690
},
{
"epoch": 1.5909090909090908,
"grad_norm": 1.1176464384766536,
"learning_rate": 5.343506181496405e-06,
"loss": 0.3221,
"step": 700
},
{
"epoch": 1.6136363636363638,
"grad_norm": 0.8887736448070302,
"learning_rate": 5.2114919447524155e-06,
"loss": 0.3333,
"step": 710
},
{
"epoch": 1.6363636363636362,
"grad_norm": 1.1062063033166711,
"learning_rate": 5.07932981917404e-06,
"loss": 0.3451,
"step": 720
},
{
"epoch": 1.6590909090909092,
"grad_norm": 0.9814802109483011,
"learning_rate": 4.947112221059803e-06,
"loss": 0.3215,
"step": 730
},
{
"epoch": 1.6818181818181817,
"grad_norm": 0.9942287700866195,
"learning_rate": 4.81493160549821e-06,
"loss": 0.32,
"step": 740
},
{
"epoch": 1.7045454545454546,
"grad_norm": 1.0881777582662082,
"learning_rate": 4.682880401717178e-06,
"loss": 0.3295,
"step": 750
},
{
"epoch": 1.7272727272727273,
"grad_norm": 0.8262863481185285,
"learning_rate": 4.551050948451542e-06,
"loss": 0.3216,
"step": 760
},
{
"epoch": 1.75,
"grad_norm": 0.8739486063657917,
"learning_rate": 4.4195354293738484e-06,
"loss": 0.3047,
"step": 770
},
{
"epoch": 1.7727272727272727,
"grad_norm": 0.7937933897637206,
"learning_rate": 4.2884258086335755e-06,
"loss": 0.3441,
"step": 780
},
{
"epoch": 1.7954545454545454,
"grad_norm": 1.0063979195807515,
"learning_rate": 4.1578137665498485e-06,
"loss": 0.322,
"step": 790
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.9811868516252389,
"learning_rate": 4.027790635502646e-06,
"loss": 0.3217,
"step": 800
},
{
"epoch": 1.8409090909090908,
"grad_norm": 1.0864122648296841,
"learning_rate": 3.898447336067297e-06,
"loss": 0.331,
"step": 810
},
{
"epoch": 1.8636363636363638,
"grad_norm": 0.99556848477455,
"learning_rate": 3.769874313436933e-06,
"loss": 0.3632,
"step": 820
},
{
"epoch": 1.8863636363636362,
"grad_norm": 0.891576989962014,
"learning_rate": 3.6421614741773702e-06,
"loss": 0.3134,
"step": 830
},
{
"epoch": 1.9090909090909092,
"grad_norm": 0.7986035082838789,
"learning_rate": 3.5153981233586277e-06,
"loss": 0.3105,
"step": 840
},
{
"epoch": 1.9318181818181817,
"grad_norm": 0.95580966680964,
"learning_rate": 3.389672902107044e-06,
"loss": 0.3276,
"step": 850
},
{
"epoch": 1.9545454545454546,
"grad_norm": 0.961790704782099,
"learning_rate": 3.2650737256216885e-06,
"loss": 0.3139,
"step": 860
},
{
"epoch": 1.9772727272727273,
"grad_norm": 0.8730438224301623,
"learning_rate": 3.141687721698363e-06,
"loss": 0.3083,
"step": 870
},
{
"epoch": 2.0,
"grad_norm": 1.0499453266134642,
"learning_rate": 3.019601169804216e-06,
"loss": 0.3335,
"step": 880
},
{
"epoch": 2.022727272727273,
"grad_norm": 0.9956657648694927,
"learning_rate": 2.898899440745569e-06,
"loss": 0.2373,
"step": 890
},
{
"epoch": 2.0454545454545454,
"grad_norm": 0.8364788663760782,
"learning_rate": 2.7796669369711294e-06,
"loss": 0.205,
"step": 900
},
{
"epoch": 2.0681818181818183,
"grad_norm": 0.7779785577534016,
"learning_rate": 2.6619870335523434e-06,
"loss": 0.2443,
"step": 910
},
{
"epoch": 2.090909090909091,
"grad_norm": 0.7498597657637932,
"learning_rate": 2.5459420198821604e-06,
"loss": 0.2284,
"step": 920
},
{
"epoch": 2.1136363636363638,
"grad_norm": 0.7866782800010009,
"learning_rate": 2.4316130421329696e-06,
"loss": 0.2161,
"step": 930
},
{
"epoch": 2.1363636363636362,
"grad_norm": 0.8286340462329047,
"learning_rate": 2.319080046513954e-06,
"loss": 0.2294,
"step": 940
},
{
"epoch": 2.159090909090909,
"grad_norm": 0.7317679036213534,
"learning_rate": 2.2084217233675386e-06,
"loss": 0.2321,
"step": 950
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.82937017655548,
"learning_rate": 2.09971545214401e-06,
"loss": 0.2141,
"step": 960
},
{
"epoch": 2.2045454545454546,
"grad_norm": 1.0909932006216758,
"learning_rate": 1.9930372472928095e-06,
"loss": 0.2249,
"step": 970
},
{
"epoch": 2.227272727272727,
"grad_norm": 0.8406918311430651,
"learning_rate": 1.8884617051083183e-06,
"loss": 0.2377,
"step": 980
},
{
"epoch": 2.25,
"grad_norm": 0.7816214910751722,
"learning_rate": 1.7860619515673034e-06,
"loss": 0.2034,
"step": 990
},
{
"epoch": 2.2727272727272725,
"grad_norm": 1.1472658731628425,
"learning_rate": 1.68590959119452e-06,
"loss": 0.2349,
"step": 1000
},
{
"epoch": 2.2954545454545454,
"grad_norm": 0.8364108799897995,
"learning_rate": 1.5880746569921867e-06,
"loss": 0.2165,
"step": 1010
},
{
"epoch": 2.3181818181818183,
"grad_norm": 0.8224990407144809,
"learning_rate": 1.4926255614683931e-06,
"loss": 0.218,
"step": 1020
},
{
"epoch": 2.340909090909091,
"grad_norm": 0.8119956704092801,
"learning_rate": 1.3996290487986568e-06,
"loss": 0.2157,
"step": 1030
},
{
"epoch": 2.3636363636363638,
"grad_norm": 0.7868638178604844,
"learning_rate": 1.3091501481540676e-06,
"loss": 0.2223,
"step": 1040
},
{
"epoch": 2.3863636363636362,
"grad_norm": 1.039084974687428,
"learning_rate": 1.2212521282287093e-06,
"loss": 0.2052,
"step": 1050
},
{
"epoch": 2.409090909090909,
"grad_norm": 1.0217506234090892,
"learning_rate": 1.135996452998085e-06,
"loss": 0.2216,
"step": 1060
},
{
"epoch": 2.4318181818181817,
"grad_norm": 0.849092945674219,
"learning_rate": 1.0534427387395391e-06,
"loss": 0.2199,
"step": 1070
},
{
"epoch": 2.4545454545454546,
"grad_norm": 0.8148942381510792,
"learning_rate": 9.73648712344707e-07,
"loss": 0.2125,
"step": 1080
},
{
"epoch": 2.4772727272727275,
"grad_norm": 0.8050466872394376,
"learning_rate": 8.966701709531344e-07,
"loss": 0.1929,
"step": 1090
},
{
"epoch": 2.5,
"grad_norm": 0.8780283448986274,
"learning_rate": 8.225609429353187e-07,
"loss": 0.2425,
"step": 1100
},
{
"epoch": 2.5227272727272725,
"grad_norm": 0.7816219793057795,
"learning_rate": 7.513728502524286e-07,
"loss": 0.2292,
"step": 1110
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.9595240653902316,
"learning_rate": 6.831556722190453e-07,
"loss": 0.2256,
"step": 1120
},
{
"epoch": 2.5681818181818183,
"grad_norm": 0.792587276105673,
"learning_rate": 6.179571106942466e-07,
"loss": 0.2154,
"step": 1130
},
{
"epoch": 2.590909090909091,
"grad_norm": 0.8358451547436763,
"learning_rate": 5.558227567253832e-07,
"loss": 0.2237,
"step": 1140
},
{
"epoch": 2.6136363636363638,
"grad_norm": 0.8903944755859766,
"learning_rate": 4.967960586678722e-07,
"loss": 0.2166,
"step": 1150
},
{
"epoch": 2.6363636363636362,
"grad_norm": 0.8509792469149934,
"learning_rate": 4.4091829180330503e-07,
"loss": 0.2125,
"step": 1160
},
{
"epoch": 2.659090909090909,
"grad_norm": 0.886408398471228,
"learning_rate": 3.882285294770938e-07,
"loss": 0.2209,
"step": 1170
},
{
"epoch": 2.6818181818181817,
"grad_norm": 0.8109381117518478,
"learning_rate": 3.3876361577587115e-07,
"loss": 0.2407,
"step": 1180
},
{
"epoch": 2.7045454545454546,
"grad_norm": 0.8002247026554093,
"learning_rate": 2.9255813976372227e-07,
"loss": 0.2343,
"step": 1190
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.83770770704907,
"learning_rate": 2.4964441129527337e-07,
"loss": 0.2453,
"step": 1200
},
{
"epoch": 2.75,
"grad_norm": 0.816464711381253,
"learning_rate": 2.1005243842255552e-07,
"loss": 0.2083,
"step": 1210
},
{
"epoch": 2.7727272727272725,
"grad_norm": 0.95604900414777,
"learning_rate": 1.738099064114368e-07,
"loss": 0.2088,
"step": 1220
},
{
"epoch": 2.7954545454545454,
"grad_norm": 0.8185862381359805,
"learning_rate": 1.4094215838229176e-07,
"loss": 0.2446,
"step": 1230
},
{
"epoch": 2.8181818181818183,
"grad_norm": 0.7499960362919149,
"learning_rate": 1.1147217758845752e-07,
"loss": 0.223,
"step": 1240
},
{
"epoch": 2.840909090909091,
"grad_norm": 0.9113822005624685,
"learning_rate": 8.542057134485638e-08,
"loss": 0.2366,
"step": 1250
},
{
"epoch": 2.8636363636363638,
"grad_norm": 0.8418617848382326,
"learning_rate": 6.280555661802857e-08,
"loss": 0.1889,
"step": 1260
},
{
"epoch": 2.8863636363636362,
"grad_norm": 0.7673661290204885,
"learning_rate": 4.3642947287654284e-08,
"loss": 0.2269,
"step": 1270
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.8092713369674382,
"learning_rate": 2.7946143088466437e-08,
"loss": 0.2184,
"step": 1280
},
{
"epoch": 2.9318181818181817,
"grad_norm": 1.0854187395464028,
"learning_rate": 1.5726120240288632e-08,
"loss": 0.2115,
"step": 1290
},
{
"epoch": 2.9545454545454546,
"grad_norm": 0.8536518727086961,
"learning_rate": 6.991423772753636e-09,
"loss": 0.2235,
"step": 1300
},
{
"epoch": 2.9772727272727275,
"grad_norm": 0.8448528485181613,
"learning_rate": 1.7481615500691829e-09,
"loss": 0.2189,
"step": 1310
},
{
"epoch": 3.0,
"grad_norm": 0.7295128069834027,
"learning_rate": 0.0,
"loss": 0.2345,
"step": 1320
},
{
"epoch": 3.0,
"step": 1320,
"total_flos": 150195282509824.0,
"train_loss": 0.3441459048878063,
"train_runtime": 5442.3628,
"train_samples_per_second": 1.937,
"train_steps_per_second": 0.243
}
],
"logging_steps": 10,
"max_steps": 1320,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 150195282509824.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}