| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 51504, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.029123951537744643, | |
| "grad_norm": 6.774685382843018, | |
| "learning_rate": 4.951557160608885e-05, | |
| "loss": 0.8291, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.058247903075489285, | |
| "grad_norm": 6.7878875732421875, | |
| "learning_rate": 4.9030172413793105e-05, | |
| "loss": 0.6161, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.08737185461323392, | |
| "grad_norm": 4.789400577545166, | |
| "learning_rate": 4.854477322149736e-05, | |
| "loss": 0.5785, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.11649580615097857, | |
| "grad_norm": 8.038426399230957, | |
| "learning_rate": 4.805937402920162e-05, | |
| "loss": 0.546, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.1456197576887232, | |
| "grad_norm": 5.627825736999512, | |
| "learning_rate": 4.757397483690587e-05, | |
| "loss": 0.5363, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.17474370922646784, | |
| "grad_norm": 4.209435939788818, | |
| "learning_rate": 4.7088575644610125e-05, | |
| "loss": 0.5225, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.20386766076421248, | |
| "grad_norm": 6.513331890106201, | |
| "learning_rate": 4.660317645231438e-05, | |
| "loss": 0.5104, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.23299161230195714, | |
| "grad_norm": 8.145315170288086, | |
| "learning_rate": 4.611777726001864e-05, | |
| "loss": 0.4894, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.26211556383970175, | |
| "grad_norm": 5.720129489898682, | |
| "learning_rate": 4.56323780677229e-05, | |
| "loss": 0.483, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.2912395153774464, | |
| "grad_norm": 4.3693718910217285, | |
| "learning_rate": 4.514697887542715e-05, | |
| "loss": 0.4747, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.3203634669151911, | |
| "grad_norm": 5.498645305633545, | |
| "learning_rate": 4.466157968313141e-05, | |
| "loss": 0.4686, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.3494874184529357, | |
| "grad_norm": 4.044460296630859, | |
| "learning_rate": 4.417618049083567e-05, | |
| "loss": 0.4679, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.37861136999068035, | |
| "grad_norm": 6.850383758544922, | |
| "learning_rate": 4.369078129853993e-05, | |
| "loss": 0.4632, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.40773532152842495, | |
| "grad_norm": 6.392009258270264, | |
| "learning_rate": 4.320538210624418e-05, | |
| "loss": 0.4521, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.4368592730661696, | |
| "grad_norm": 4.33333683013916, | |
| "learning_rate": 4.271998291394843e-05, | |
| "loss": 0.447, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.4659832246039143, | |
| "grad_norm": 3.611548662185669, | |
| "learning_rate": 4.223458372165269e-05, | |
| "loss": 0.4465, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.4951071761416589, | |
| "grad_norm": 5.756487846374512, | |
| "learning_rate": 4.1749184529356947e-05, | |
| "loss": 0.4354, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.5242311276794035, | |
| "grad_norm": 5.3887786865234375, | |
| "learning_rate": 4.12637853370612e-05, | |
| "loss": 0.4395, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.5533550792171482, | |
| "grad_norm": 4.947877883911133, | |
| "learning_rate": 4.0778386144765457e-05, | |
| "loss": 0.4431, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.5824790307548928, | |
| "grad_norm": 6.60713529586792, | |
| "learning_rate": 4.0292986952469715e-05, | |
| "loss": 0.4355, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.6116029822926374, | |
| "grad_norm": 4.627679347991943, | |
| "learning_rate": 3.980758776017397e-05, | |
| "loss": 0.4317, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.6407269338303822, | |
| "grad_norm": 4.922262668609619, | |
| "learning_rate": 3.9322188567878225e-05, | |
| "loss": 0.4247, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.6698508853681268, | |
| "grad_norm": 3.914152145385742, | |
| "learning_rate": 3.883678937558248e-05, | |
| "loss": 0.4334, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.6989748369058714, | |
| "grad_norm": 4.864943027496338, | |
| "learning_rate": 3.835139018328674e-05, | |
| "loss": 0.4185, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.7280987884436161, | |
| "grad_norm": 10.3380708694458, | |
| "learning_rate": 3.786599099099099e-05, | |
| "loss": 0.4223, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.7572227399813607, | |
| "grad_norm": 4.065073490142822, | |
| "learning_rate": 3.7380591798695245e-05, | |
| "loss": 0.4086, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.7863466915191053, | |
| "grad_norm": 4.190847873687744, | |
| "learning_rate": 3.68951926063995e-05, | |
| "loss": 0.4156, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.8154706430568499, | |
| "grad_norm": 5.804997444152832, | |
| "learning_rate": 3.640979341410376e-05, | |
| "loss": 0.4052, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.8445945945945946, | |
| "grad_norm": 7.701807975769043, | |
| "learning_rate": 3.592439422180802e-05, | |
| "loss": 0.4133, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.8737185461323392, | |
| "grad_norm": 17.949703216552734, | |
| "learning_rate": 3.543899502951227e-05, | |
| "loss": 0.4076, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.9028424976700838, | |
| "grad_norm": 5.222975254058838, | |
| "learning_rate": 3.495359583721653e-05, | |
| "loss": 0.4201, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.9319664492078286, | |
| "grad_norm": 4.896594047546387, | |
| "learning_rate": 3.446819664492079e-05, | |
| "loss": 0.4035, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.9610904007455732, | |
| "grad_norm": 5.599170207977295, | |
| "learning_rate": 3.398279745262504e-05, | |
| "loss": 0.4065, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.9902143522833178, | |
| "grad_norm": 3.4558660984039307, | |
| "learning_rate": 3.34973982603293e-05, | |
| "loss": 0.413, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.0193383038210624, | |
| "grad_norm": 6.404013156890869, | |
| "learning_rate": 3.301199906803355e-05, | |
| "loss": 0.3758, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.048462255358807, | |
| "grad_norm": 5.1876420974731445, | |
| "learning_rate": 3.252659987573781e-05, | |
| "loss": 0.3567, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.0775862068965518, | |
| "grad_norm": 3.246553421020508, | |
| "learning_rate": 3.204120068344206e-05, | |
| "loss": 0.362, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.1067101584342964, | |
| "grad_norm": 5.665416240692139, | |
| "learning_rate": 3.155580149114632e-05, | |
| "loss": 0.3639, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.135834109972041, | |
| "grad_norm": 4.938392162322998, | |
| "learning_rate": 3.1070402298850576e-05, | |
| "loss": 0.3649, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.1649580615097856, | |
| "grad_norm": 4.021746635437012, | |
| "learning_rate": 3.0585003106554835e-05, | |
| "loss": 0.3688, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.1940820130475303, | |
| "grad_norm": 5.639608860015869, | |
| "learning_rate": 3.009960391425909e-05, | |
| "loss": 0.357, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.2232059645852749, | |
| "grad_norm": 5.954344272613525, | |
| "learning_rate": 2.9614204721963345e-05, | |
| "loss": 0.3614, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.2523299161230197, | |
| "grad_norm": 4.498464107513428, | |
| "learning_rate": 2.9128805529667603e-05, | |
| "loss": 0.3592, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.281453867660764, | |
| "grad_norm": 7.547128200531006, | |
| "learning_rate": 2.8643406337371858e-05, | |
| "loss": 0.3597, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.310577819198509, | |
| "grad_norm": 6.078393459320068, | |
| "learning_rate": 2.815800714507611e-05, | |
| "loss": 0.3643, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.3397017707362535, | |
| "grad_norm": 5.5143303871154785, | |
| "learning_rate": 2.7672607952780365e-05, | |
| "loss": 0.359, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.3688257222739981, | |
| "grad_norm": 5.585659027099609, | |
| "learning_rate": 2.7187208760484623e-05, | |
| "loss": 0.3647, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.3979496738117427, | |
| "grad_norm": 6.159289836883545, | |
| "learning_rate": 2.6701809568188878e-05, | |
| "loss": 0.3525, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.4270736253494873, | |
| "grad_norm": 4.410373687744141, | |
| "learning_rate": 2.6216410375893136e-05, | |
| "loss": 0.3518, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 1.4561975768872322, | |
| "grad_norm": 4.7880754470825195, | |
| "learning_rate": 2.573101118359739e-05, | |
| "loss": 0.3568, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 1.4853215284249768, | |
| "grad_norm": 4.985711097717285, | |
| "learning_rate": 2.524561199130165e-05, | |
| "loss": 0.3566, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 1.5144454799627214, | |
| "grad_norm": 7.0648088455200195, | |
| "learning_rate": 2.47602127990059e-05, | |
| "loss": 0.3634, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.543569431500466, | |
| "grad_norm": 5.251628875732422, | |
| "learning_rate": 2.427481360671016e-05, | |
| "loss": 0.3526, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 1.5726933830382106, | |
| "grad_norm": 8.0952787399292, | |
| "learning_rate": 2.3789414414414415e-05, | |
| "loss": 0.3489, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 1.6018173345759554, | |
| "grad_norm": 3.6345725059509277, | |
| "learning_rate": 2.3304015222118673e-05, | |
| "loss": 0.3591, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 1.6309412861136998, | |
| "grad_norm": 6.777653217315674, | |
| "learning_rate": 2.2818616029822928e-05, | |
| "loss": 0.3566, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.6600652376514446, | |
| "grad_norm": 5.334507465362549, | |
| "learning_rate": 2.2333216837527183e-05, | |
| "loss": 0.3506, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 1.689189189189189, | |
| "grad_norm": 5.431693077087402, | |
| "learning_rate": 2.1847817645231438e-05, | |
| "loss": 0.358, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 1.7183131407269339, | |
| "grad_norm": 8.189932823181152, | |
| "learning_rate": 2.1362418452935696e-05, | |
| "loss": 0.3535, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 1.7474370922646785, | |
| "grad_norm": 6.549941062927246, | |
| "learning_rate": 2.087701926063995e-05, | |
| "loss": 0.3502, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 1.776561043802423, | |
| "grad_norm": 5.226093292236328, | |
| "learning_rate": 2.039162006834421e-05, | |
| "loss": 0.3509, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 1.805684995340168, | |
| "grad_norm": 4.530932426452637, | |
| "learning_rate": 1.990622087604846e-05, | |
| "loss": 0.3439, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 1.8348089468779123, | |
| "grad_norm": 2.6704397201538086, | |
| "learning_rate": 1.942082168375272e-05, | |
| "loss": 0.3417, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 1.8639328984156571, | |
| "grad_norm": 4.931678295135498, | |
| "learning_rate": 1.8935422491456975e-05, | |
| "loss": 0.3489, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 1.8930568499534017, | |
| "grad_norm": 3.0061357021331787, | |
| "learning_rate": 1.8450023299161233e-05, | |
| "loss": 0.3478, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 1.9221808014911463, | |
| "grad_norm": 5.605920314788818, | |
| "learning_rate": 1.7964624106865488e-05, | |
| "loss": 0.3469, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 1.951304753028891, | |
| "grad_norm": 8.776355743408203, | |
| "learning_rate": 1.7479224914569743e-05, | |
| "loss": 0.3479, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 1.9804287045666356, | |
| "grad_norm": 6.094631671905518, | |
| "learning_rate": 1.6993825722273998e-05, | |
| "loss": 0.3465, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.0095526561043804, | |
| "grad_norm": 3.9695520401000977, | |
| "learning_rate": 1.6508426529978256e-05, | |
| "loss": 0.337, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 2.0386766076421248, | |
| "grad_norm": 9.629318237304688, | |
| "learning_rate": 1.602302733768251e-05, | |
| "loss": 0.2991, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.0678005591798696, | |
| "grad_norm": 5.794491767883301, | |
| "learning_rate": 1.5537628145386766e-05, | |
| "loss": 0.3049, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 2.096924510717614, | |
| "grad_norm": 3.9909379482269287, | |
| "learning_rate": 1.5052228953091021e-05, | |
| "loss": 0.307, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.126048462255359, | |
| "grad_norm": 5.097212314605713, | |
| "learning_rate": 1.4566829760795278e-05, | |
| "loss": 0.3108, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 2.1551724137931036, | |
| "grad_norm": 4.131860733032227, | |
| "learning_rate": 1.4081430568499535e-05, | |
| "loss": 0.308, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 2.184296365330848, | |
| "grad_norm": 5.7827372550964355, | |
| "learning_rate": 1.3596031376203791e-05, | |
| "loss": 0.3022, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 2.213420316868593, | |
| "grad_norm": 5.650452613830566, | |
| "learning_rate": 1.3110632183908048e-05, | |
| "loss": 0.3084, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.2425442684063372, | |
| "grad_norm": 7.3451151847839355, | |
| "learning_rate": 1.2625232991612301e-05, | |
| "loss": 0.2974, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 2.271668219944082, | |
| "grad_norm": 6.839465141296387, | |
| "learning_rate": 1.2139833799316558e-05, | |
| "loss": 0.2978, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 2.3007921714818265, | |
| "grad_norm": 3.4287917613983154, | |
| "learning_rate": 1.1654434607020815e-05, | |
| "loss": 0.3137, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 2.3299161230195713, | |
| "grad_norm": 7.438332557678223, | |
| "learning_rate": 1.1169035414725071e-05, | |
| "loss": 0.3032, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.359040074557316, | |
| "grad_norm": 4.485251426696777, | |
| "learning_rate": 1.0683636222429326e-05, | |
| "loss": 0.31, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 2.3881640260950605, | |
| "grad_norm": 4.613361835479736, | |
| "learning_rate": 1.0198237030133583e-05, | |
| "loss": 0.3076, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 2.4172879776328053, | |
| "grad_norm": 3.4650862216949463, | |
| "learning_rate": 9.712837837837838e-06, | |
| "loss": 0.2925, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 2.4464119291705497, | |
| "grad_norm": 2.9402899742126465, | |
| "learning_rate": 9.227438645542095e-06, | |
| "loss": 0.3019, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 2.4755358807082946, | |
| "grad_norm": 3.6428680419921875, | |
| "learning_rate": 8.742039453246351e-06, | |
| "loss": 0.3148, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 2.5046598322460394, | |
| "grad_norm": 4.835502624511719, | |
| "learning_rate": 8.256640260950606e-06, | |
| "loss": 0.2923, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 2.5337837837837838, | |
| "grad_norm": 6.484007835388184, | |
| "learning_rate": 7.771241068654863e-06, | |
| "loss": 0.2889, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 2.562907735321528, | |
| "grad_norm": 7.5975565910339355, | |
| "learning_rate": 7.285841876359117e-06, | |
| "loss": 0.2909, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.592031686859273, | |
| "grad_norm": 5.18363094329834, | |
| "learning_rate": 6.800442684063374e-06, | |
| "loss": 0.3019, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 2.621155638397018, | |
| "grad_norm": 9.518139839172363, | |
| "learning_rate": 6.31504349176763e-06, | |
| "loss": 0.3029, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 2.650279589934762, | |
| "grad_norm": 3.660771369934082, | |
| "learning_rate": 5.829644299471886e-06, | |
| "loss": 0.301, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 2.679403541472507, | |
| "grad_norm": 6.393496036529541, | |
| "learning_rate": 5.344245107176142e-06, | |
| "loss": 0.3006, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 2.7085274930102514, | |
| "grad_norm": 5.712442398071289, | |
| "learning_rate": 4.858845914880398e-06, | |
| "loss": 0.315, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 2.7376514445479962, | |
| "grad_norm": 7.901050090789795, | |
| "learning_rate": 4.373446722584654e-06, | |
| "loss": 0.3021, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 2.766775396085741, | |
| "grad_norm": 6.925070762634277, | |
| "learning_rate": 3.8880475302889095e-06, | |
| "loss": 0.2989, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 2.7958993476234855, | |
| "grad_norm": 2.2326674461364746, | |
| "learning_rate": 3.402648337993166e-06, | |
| "loss": 0.3024, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 2.8250232991612303, | |
| "grad_norm": 7.3733320236206055, | |
| "learning_rate": 2.917249145697422e-06, | |
| "loss": 0.3078, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 2.8541472506989747, | |
| "grad_norm": 5.280145645141602, | |
| "learning_rate": 2.431849953401678e-06, | |
| "loss": 0.2995, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 2.8832712022367195, | |
| "grad_norm": 5.809254169464111, | |
| "learning_rate": 1.9464507611059337e-06, | |
| "loss": 0.3008, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 2.9123951537744643, | |
| "grad_norm": 6.8635430335998535, | |
| "learning_rate": 1.4610515688101895e-06, | |
| "loss": 0.2995, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 2.9415191053122087, | |
| "grad_norm": 6.390039920806885, | |
| "learning_rate": 9.756523765144455e-07, | |
| "loss": 0.301, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 2.9706430568499536, | |
| "grad_norm": 7.5721964836120605, | |
| "learning_rate": 4.902531842187015e-07, | |
| "loss": 0.305, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 2.999767008387698, | |
| "grad_norm": 6.955125331878662, | |
| "learning_rate": 4.85399192295744e-09, | |
| "loss": 0.3002, | |
| "step": 51500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 51504, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2121938098975232e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |