| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 51504, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.029123951537744643, | |
| "grad_norm": 7.490014553070068, | |
| "learning_rate": 4.951557160608885e-05, | |
| "loss": 0.7812, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.058247903075489285, | |
| "grad_norm": 7.123765468597412, | |
| "learning_rate": 4.9030172413793105e-05, | |
| "loss": 0.5964, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.08737185461323392, | |
| "grad_norm": 6.443025588989258, | |
| "learning_rate": 4.854477322149736e-05, | |
| "loss": 0.5417, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.11649580615097857, | |
| "grad_norm": 4.733907222747803, | |
| "learning_rate": 4.805937402920162e-05, | |
| "loss": 0.5339, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.1456197576887232, | |
| "grad_norm": 9.525611877441406, | |
| "learning_rate": 4.757397483690587e-05, | |
| "loss": 0.5109, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.17474370922646784, | |
| "grad_norm": 5.1521196365356445, | |
| "learning_rate": 4.7088575644610125e-05, | |
| "loss": 0.4935, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.20386766076421248, | |
| "grad_norm": 9.645527839660645, | |
| "learning_rate": 4.660317645231438e-05, | |
| "loss": 0.4834, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.23299161230195714, | |
| "grad_norm": 6.0481414794921875, | |
| "learning_rate": 4.611777726001864e-05, | |
| "loss": 0.4725, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.26211556383970175, | |
| "grad_norm": 9.349271774291992, | |
| "learning_rate": 4.56323780677229e-05, | |
| "loss": 0.4556, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.2912395153774464, | |
| "grad_norm": 5.191974639892578, | |
| "learning_rate": 4.514697887542715e-05, | |
| "loss": 0.4638, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.3203634669151911, | |
| "grad_norm": 5.982846736907959, | |
| "learning_rate": 4.466157968313141e-05, | |
| "loss": 0.454, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.3494874184529357, | |
| "grad_norm": 5.010865688323975, | |
| "learning_rate": 4.417618049083567e-05, | |
| "loss": 0.4461, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.37861136999068035, | |
| "grad_norm": 5.071208477020264, | |
| "learning_rate": 4.369078129853993e-05, | |
| "loss": 0.4338, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.40773532152842495, | |
| "grad_norm": 6.769781589508057, | |
| "learning_rate": 4.320538210624418e-05, | |
| "loss": 0.4376, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.4368592730661696, | |
| "grad_norm": 3.6443066596984863, | |
| "learning_rate": 4.271998291394843e-05, | |
| "loss": 0.4357, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.4659832246039143, | |
| "grad_norm": 7.876840591430664, | |
| "learning_rate": 4.223458372165269e-05, | |
| "loss": 0.4321, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.4951071761416589, | |
| "grad_norm": 5.94719934463501, | |
| "learning_rate": 4.1749184529356947e-05, | |
| "loss": 0.4181, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.5242311276794035, | |
| "grad_norm": 5.828630447387695, | |
| "learning_rate": 4.12637853370612e-05, | |
| "loss": 0.4202, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.5533550792171482, | |
| "grad_norm": 4.156304359436035, | |
| "learning_rate": 4.0778386144765457e-05, | |
| "loss": 0.4263, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.5824790307548928, | |
| "grad_norm": 4.188558101654053, | |
| "learning_rate": 4.0292986952469715e-05, | |
| "loss": 0.4262, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.6116029822926374, | |
| "grad_norm": 4.157588005065918, | |
| "learning_rate": 3.980758776017397e-05, | |
| "loss": 0.4174, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.6407269338303822, | |
| "grad_norm": 4.249637126922607, | |
| "learning_rate": 3.9322188567878225e-05, | |
| "loss": 0.414, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.6698508853681268, | |
| "grad_norm": 3.856900930404663, | |
| "learning_rate": 3.883678937558248e-05, | |
| "loss": 0.4129, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.6989748369058714, | |
| "grad_norm": 4.5732951164245605, | |
| "learning_rate": 3.835139018328674e-05, | |
| "loss": 0.4075, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.7280987884436161, | |
| "grad_norm": 7.146569728851318, | |
| "learning_rate": 3.786599099099099e-05, | |
| "loss": 0.4101, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.7572227399813607, | |
| "grad_norm": 4.266728401184082, | |
| "learning_rate": 3.7380591798695245e-05, | |
| "loss": 0.4121, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.7863466915191053, | |
| "grad_norm": 6.878367900848389, | |
| "learning_rate": 3.68951926063995e-05, | |
| "loss": 0.3978, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.8154706430568499, | |
| "grad_norm": 5.727404594421387, | |
| "learning_rate": 3.640979341410376e-05, | |
| "loss": 0.3952, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.8445945945945946, | |
| "grad_norm": 9.332341194152832, | |
| "learning_rate": 3.592439422180802e-05, | |
| "loss": 0.3939, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.8737185461323392, | |
| "grad_norm": 2.863827705383301, | |
| "learning_rate": 3.543899502951227e-05, | |
| "loss": 0.3988, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.9028424976700838, | |
| "grad_norm": 3.3920953273773193, | |
| "learning_rate": 3.495359583721653e-05, | |
| "loss": 0.3908, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.9319664492078286, | |
| "grad_norm": 4.0478596687316895, | |
| "learning_rate": 3.446819664492079e-05, | |
| "loss": 0.3984, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.9610904007455732, | |
| "grad_norm": 5.245906829833984, | |
| "learning_rate": 3.398279745262504e-05, | |
| "loss": 0.3934, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.9902143522833178, | |
| "grad_norm": 7.446463108062744, | |
| "learning_rate": 3.34973982603293e-05, | |
| "loss": 0.3835, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.0193383038210624, | |
| "grad_norm": 5.9738311767578125, | |
| "learning_rate": 3.301199906803355e-05, | |
| "loss": 0.3593, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.048462255358807, | |
| "grad_norm": 4.593014717102051, | |
| "learning_rate": 3.252659987573781e-05, | |
| "loss": 0.3524, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.0775862068965518, | |
| "grad_norm": 5.643582344055176, | |
| "learning_rate": 3.204120068344206e-05, | |
| "loss": 0.3387, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.1067101584342964, | |
| "grad_norm": 6.060087203979492, | |
| "learning_rate": 3.155580149114632e-05, | |
| "loss": 0.3532, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.135834109972041, | |
| "grad_norm": 11.5077543258667, | |
| "learning_rate": 3.1070402298850576e-05, | |
| "loss": 0.3469, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.1649580615097856, | |
| "grad_norm": 6.907314300537109, | |
| "learning_rate": 3.0585003106554835e-05, | |
| "loss": 0.3606, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.1940820130475303, | |
| "grad_norm": 6.096247673034668, | |
| "learning_rate": 3.009960391425909e-05, | |
| "loss": 0.3573, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.2232059645852749, | |
| "grad_norm": 5.199579238891602, | |
| "learning_rate": 2.9614204721963345e-05, | |
| "loss": 0.3481, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.2523299161230197, | |
| "grad_norm": 6.14545202255249, | |
| "learning_rate": 2.9128805529667603e-05, | |
| "loss": 0.3425, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.281453867660764, | |
| "grad_norm": 4.431536674499512, | |
| "learning_rate": 2.8643406337371858e-05, | |
| "loss": 0.3418, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.310577819198509, | |
| "grad_norm": 5.145776748657227, | |
| "learning_rate": 2.815800714507611e-05, | |
| "loss": 0.3462, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.3397017707362535, | |
| "grad_norm": 3.804363965988159, | |
| "learning_rate": 2.7672607952780365e-05, | |
| "loss": 0.3461, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.3688257222739981, | |
| "grad_norm": 5.607990741729736, | |
| "learning_rate": 2.7187208760484623e-05, | |
| "loss": 0.3415, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.3979496738117427, | |
| "grad_norm": 4.164012432098389, | |
| "learning_rate": 2.6701809568188878e-05, | |
| "loss": 0.3442, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.4270736253494873, | |
| "grad_norm": 5.769927978515625, | |
| "learning_rate": 2.6216410375893136e-05, | |
| "loss": 0.3409, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 1.4561975768872322, | |
| "grad_norm": 6.918564796447754, | |
| "learning_rate": 2.573101118359739e-05, | |
| "loss": 0.338, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 1.4853215284249768, | |
| "grad_norm": 11.89932918548584, | |
| "learning_rate": 2.524561199130165e-05, | |
| "loss": 0.329, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 1.5144454799627214, | |
| "grad_norm": 6.199171543121338, | |
| "learning_rate": 2.47602127990059e-05, | |
| "loss": 0.3511, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.543569431500466, | |
| "grad_norm": 4.363777160644531, | |
| "learning_rate": 2.427481360671016e-05, | |
| "loss": 0.3396, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 1.5726933830382106, | |
| "grad_norm": 3.2776997089385986, | |
| "learning_rate": 2.3789414414414415e-05, | |
| "loss": 0.3386, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 1.6018173345759554, | |
| "grad_norm": 5.156482696533203, | |
| "learning_rate": 2.3304015222118673e-05, | |
| "loss": 0.3387, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 1.6309412861136998, | |
| "grad_norm": 4.603842735290527, | |
| "learning_rate": 2.2818616029822928e-05, | |
| "loss": 0.3441, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.6600652376514446, | |
| "grad_norm": 3.997964382171631, | |
| "learning_rate": 2.2333216837527183e-05, | |
| "loss": 0.3396, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 1.689189189189189, | |
| "grad_norm": 6.630443572998047, | |
| "learning_rate": 2.1847817645231438e-05, | |
| "loss": 0.3382, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 1.7183131407269339, | |
| "grad_norm": 4.180856704711914, | |
| "learning_rate": 2.1362418452935696e-05, | |
| "loss": 0.3411, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 1.7474370922646785, | |
| "grad_norm": 4.533492565155029, | |
| "learning_rate": 2.087701926063995e-05, | |
| "loss": 0.3363, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 1.776561043802423, | |
| "grad_norm": 5.366372585296631, | |
| "learning_rate": 2.039162006834421e-05, | |
| "loss": 0.3368, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 1.805684995340168, | |
| "grad_norm": 4.742730617523193, | |
| "learning_rate": 1.990622087604846e-05, | |
| "loss": 0.3315, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 1.8348089468779123, | |
| "grad_norm": 4.7824859619140625, | |
| "learning_rate": 1.942082168375272e-05, | |
| "loss": 0.3367, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 1.8639328984156571, | |
| "grad_norm": 3.871854782104492, | |
| "learning_rate": 1.8935422491456975e-05, | |
| "loss": 0.3267, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 1.8930568499534017, | |
| "grad_norm": 3.5342345237731934, | |
| "learning_rate": 1.8450023299161233e-05, | |
| "loss": 0.3329, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 1.9221808014911463, | |
| "grad_norm": 6.764543056488037, | |
| "learning_rate": 1.7964624106865488e-05, | |
| "loss": 0.3269, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 1.951304753028891, | |
| "grad_norm": 3.221038341522217, | |
| "learning_rate": 1.7479224914569743e-05, | |
| "loss": 0.3264, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 1.9804287045666356, | |
| "grad_norm": 7.037004470825195, | |
| "learning_rate": 1.6993825722273998e-05, | |
| "loss": 0.339, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.0095526561043804, | |
| "grad_norm": 6.426731109619141, | |
| "learning_rate": 1.6508426529978256e-05, | |
| "loss": 0.3099, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 2.0386766076421248, | |
| "grad_norm": 9.10340404510498, | |
| "learning_rate": 1.602302733768251e-05, | |
| "loss": 0.2947, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.0678005591798696, | |
| "grad_norm": 5.635839462280273, | |
| "learning_rate": 1.5537628145386766e-05, | |
| "loss": 0.2866, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 2.096924510717614, | |
| "grad_norm": 7.611632823944092, | |
| "learning_rate": 1.5052228953091021e-05, | |
| "loss": 0.2834, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.126048462255359, | |
| "grad_norm": 3.599088668823242, | |
| "learning_rate": 1.4566829760795278e-05, | |
| "loss": 0.2973, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 2.1551724137931036, | |
| "grad_norm": 5.531040668487549, | |
| "learning_rate": 1.4081430568499535e-05, | |
| "loss": 0.2997, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 2.184296365330848, | |
| "grad_norm": 3.645749568939209, | |
| "learning_rate": 1.3596031376203791e-05, | |
| "loss": 0.2921, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 2.213420316868593, | |
| "grad_norm": 3.7981762886047363, | |
| "learning_rate": 1.3110632183908048e-05, | |
| "loss": 0.2949, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.2425442684063372, | |
| "grad_norm": 6.629352569580078, | |
| "learning_rate": 1.2625232991612301e-05, | |
| "loss": 0.3025, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 2.271668219944082, | |
| "grad_norm": 5.726624011993408, | |
| "learning_rate": 1.2139833799316558e-05, | |
| "loss": 0.291, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 2.3007921714818265, | |
| "grad_norm": 6.404401779174805, | |
| "learning_rate": 1.1654434607020815e-05, | |
| "loss": 0.2903, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 2.3299161230195713, | |
| "grad_norm": 5.85780143737793, | |
| "learning_rate": 1.1169035414725071e-05, | |
| "loss": 0.2929, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.359040074557316, | |
| "grad_norm": 5.6245856285095215, | |
| "learning_rate": 1.0683636222429326e-05, | |
| "loss": 0.2831, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 2.3881640260950605, | |
| "grad_norm": 7.399583339691162, | |
| "learning_rate": 1.0198237030133583e-05, | |
| "loss": 0.2967, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 2.4172879776328053, | |
| "grad_norm": 4.536987781524658, | |
| "learning_rate": 9.712837837837838e-06, | |
| "loss": 0.29, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 2.4464119291705497, | |
| "grad_norm": 7.220358371734619, | |
| "learning_rate": 9.227438645542095e-06, | |
| "loss": 0.287, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 2.4755358807082946, | |
| "grad_norm": 8.658233642578125, | |
| "learning_rate": 8.742039453246351e-06, | |
| "loss": 0.2841, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 2.5046598322460394, | |
| "grad_norm": 4.1782732009887695, | |
| "learning_rate": 8.256640260950606e-06, | |
| "loss": 0.2864, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 2.5337837837837838, | |
| "grad_norm": 6.624438285827637, | |
| "learning_rate": 7.771241068654863e-06, | |
| "loss": 0.2899, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 2.562907735321528, | |
| "grad_norm": 4.993212699890137, | |
| "learning_rate": 7.285841876359117e-06, | |
| "loss": 0.2908, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.592031686859273, | |
| "grad_norm": 4.049441337585449, | |
| "learning_rate": 6.800442684063374e-06, | |
| "loss": 0.2866, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 2.621155638397018, | |
| "grad_norm": 7.236120700836182, | |
| "learning_rate": 6.31504349176763e-06, | |
| "loss": 0.2883, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 2.650279589934762, | |
| "grad_norm": 3.532236099243164, | |
| "learning_rate": 5.829644299471886e-06, | |
| "loss": 0.2864, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 2.679403541472507, | |
| "grad_norm": 6.296010494232178, | |
| "learning_rate": 5.344245107176142e-06, | |
| "loss": 0.2917, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 2.7085274930102514, | |
| "grad_norm": 5.174038410186768, | |
| "learning_rate": 4.858845914880398e-06, | |
| "loss": 0.2761, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 2.7376514445479962, | |
| "grad_norm": 5.052979946136475, | |
| "learning_rate": 4.373446722584654e-06, | |
| "loss": 0.2812, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 2.766775396085741, | |
| "grad_norm": 11.780526161193848, | |
| "learning_rate": 3.8880475302889095e-06, | |
| "loss": 0.2794, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 2.7958993476234855, | |
| "grad_norm": 6.626284122467041, | |
| "learning_rate": 3.402648337993166e-06, | |
| "loss": 0.2868, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 2.8250232991612303, | |
| "grad_norm": 5.827520370483398, | |
| "learning_rate": 2.917249145697422e-06, | |
| "loss": 0.2874, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 2.8541472506989747, | |
| "grad_norm": 5.66077995300293, | |
| "learning_rate": 2.431849953401678e-06, | |
| "loss": 0.2873, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 2.8832712022367195, | |
| "grad_norm": 5.199749946594238, | |
| "learning_rate": 1.9464507611059337e-06, | |
| "loss": 0.2808, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 2.9123951537744643, | |
| "grad_norm": 6.272824764251709, | |
| "learning_rate": 1.4610515688101895e-06, | |
| "loss": 0.2904, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 2.9415191053122087, | |
| "grad_norm": 4.418920993804932, | |
| "learning_rate": 9.756523765144455e-07, | |
| "loss": 0.2883, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 2.9706430568499536, | |
| "grad_norm": 6.885050296783447, | |
| "learning_rate": 4.902531842187015e-07, | |
| "loss": 0.2816, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 2.999767008387698, | |
| "grad_norm": 5.115917205810547, | |
| "learning_rate": 4.85399192295744e-09, | |
| "loss": 0.2863, | |
| "step": 51500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 51504, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2121960164297984e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |