| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 96, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03125, | |
| "grad_norm": 0.016425726780162, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.1769, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0625, | |
| "grad_norm": 0.013231725110253372, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.1792, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.09375, | |
| "grad_norm": 0.013103518070364825, | |
| "learning_rate": 2.4e-05, | |
| "loss": 0.1916, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 0.014507073768722529, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.2018, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.15625, | |
| "grad_norm": 0.013930637442297231, | |
| "learning_rate": 4e-05, | |
| "loss": 0.1795, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1875, | |
| "grad_norm": 0.016689395846806662, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.1899, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.21875, | |
| "grad_norm": 0.019014765666609475, | |
| "learning_rate": 5.6e-05, | |
| "loss": 0.1843, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.021235355847763007, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 0.2, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.28125, | |
| "grad_norm": 0.021428572165167116, | |
| "learning_rate": 7.2e-05, | |
| "loss": 0.1907, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.3125, | |
| "grad_norm": 0.022746036544687907, | |
| "learning_rate": 8e-05, | |
| "loss": 0.1749, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.34375, | |
| "grad_norm": 0.015660379977915136, | |
| "learning_rate": 7.997331393480957e-05, | |
| "loss": 0.1535, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 0.01417217686690509, | |
| "learning_rate": 7.989329134654207e-05, | |
| "loss": 0.1653, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.40625, | |
| "grad_norm": 0.017727320057087954, | |
| "learning_rate": 7.976003900959785e-05, | |
| "loss": 0.1598, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.4375, | |
| "grad_norm": 0.02773697497548891, | |
| "learning_rate": 7.957373472300442e-05, | |
| "loss": 0.1574, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.46875, | |
| "grad_norm": 0.03628053020995818, | |
| "learning_rate": 7.933462707317864e-05, | |
| "loss": 0.1499, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.022331053579954328, | |
| "learning_rate": 7.90430351022371e-05, | |
| "loss": 0.1519, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.53125, | |
| "grad_norm": 0.01941017254525756, | |
| "learning_rate": 7.869934788229701e-05, | |
| "loss": 0.1561, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.5625, | |
| "grad_norm": 0.010903230093302288, | |
| "learning_rate": 7.830402399633624e-05, | |
| "loss": 0.1428, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.59375, | |
| "grad_norm": 0.011448294757974464, | |
| "learning_rate": 7.785759092630437e-05, | |
| "loss": 0.1544, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 0.01535786310963081, | |
| "learning_rate": 7.736064434930193e-05, | |
| "loss": 0.1262, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.65625, | |
| "grad_norm": 0.012104247042808634, | |
| "learning_rate": 7.681384734276638e-05, | |
| "loss": 0.1387, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.6875, | |
| "grad_norm": 0.011914085432472963, | |
| "learning_rate": 7.621792949972588e-05, | |
| "loss": 0.1343, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.71875, | |
| "grad_norm": 0.010990223881779806, | |
| "learning_rate": 7.557368595530076e-05, | |
| "loss": 0.1238, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.01320819515665581, | |
| "learning_rate": 7.488197632575232e-05, | |
| "loss": 0.1214, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.78125, | |
| "grad_norm": 0.01322245765237176, | |
| "learning_rate": 7.414372356149387e-05, | |
| "loss": 0.1153, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.8125, | |
| "grad_norm": 0.010385923912313553, | |
| "learning_rate": 7.335991271559512e-05, | |
| "loss": 0.1223, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.84375, | |
| "grad_norm": 0.012238549923832583, | |
| "learning_rate": 7.253158962942263e-05, | |
| "loss": 0.0986, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.875, | |
| "grad_norm": 0.010400016197007436, | |
| "learning_rate": 7.165985953717017e-05, | |
| "loss": 0.1286, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.90625, | |
| "grad_norm": 0.010671721518367596, | |
| "learning_rate": 7.074588559114129e-05, | |
| "loss": 0.0961, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.9375, | |
| "grad_norm": 0.010414959893719981, | |
| "learning_rate": 6.979088730975128e-05, | |
| "loss": 0.1112, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.96875, | |
| "grad_norm": 0.01357086820570754, | |
| "learning_rate": 6.879613895031985e-05, | |
| "loss": 0.1148, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.008686431578805668, | |
| "learning_rate": 6.776296780882537e-05, | |
| "loss": 0.0883, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.03125, | |
| "grad_norm": 0.008746187137147936, | |
| "learning_rate": 6.669275244888958e-05, | |
| "loss": 0.0889, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.0625, | |
| "grad_norm": 0.008757438982799376, | |
| "learning_rate": 6.558692086235565e-05, | |
| "loss": 0.104, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.09375, | |
| "grad_norm": 0.009006564148642229, | |
| "learning_rate": 6.444694856391398e-05, | |
| "loss": 0.122, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.125, | |
| "grad_norm": 0.010008172066511766, | |
| "learning_rate": 6.327435662231812e-05, | |
| "loss": 0.0901, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.15625, | |
| "grad_norm": 0.009132505165485642, | |
| "learning_rate": 6.207070963081785e-05, | |
| "loss": 0.086, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.1875, | |
| "grad_norm": 0.009598935444053497, | |
| "learning_rate": 6.083761361951722e-05, | |
| "loss": 0.0936, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.21875, | |
| "grad_norm": 0.008378651687178142, | |
| "learning_rate": 5.9576713912443424e-05, | |
| "loss": 0.0885, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.00933039866826418, | |
| "learning_rate": 5.8289692932185546e-05, | |
| "loss": 0.0779, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.28125, | |
| "grad_norm": 0.0075829952615778205, | |
| "learning_rate": 5.697826795503261e-05, | |
| "loss": 0.0874, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.3125, | |
| "grad_norm": 0.010708180538382118, | |
| "learning_rate": 5.564418881960624e-05, | |
| "loss": 0.0859, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.34375, | |
| "grad_norm": 0.007747505771324856, | |
| "learning_rate": 5.428923559204531e-05, | |
| "loss": 0.1006, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.375, | |
| "grad_norm": 0.009750309828485825, | |
| "learning_rate": 5.291521619085785e-05, | |
| "loss": 0.0782, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.40625, | |
| "grad_norm": 0.009861574342948768, | |
| "learning_rate": 5.1523963974609515e-05, | |
| "loss": 0.091, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.4375, | |
| "grad_norm": 0.009603678940639833, | |
| "learning_rate": 5.011733529566723e-05, | |
| "loss": 0.0807, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.46875, | |
| "grad_norm": 0.00888308808997568, | |
| "learning_rate": 4.869720702326229e-05, | |
| "loss": 0.0857, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.009414702119914659, | |
| "learning_rate": 4.726547403917746e-05, | |
| "loss": 0.0979, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.53125, | |
| "grad_norm": 0.008610758398705433, | |
| "learning_rate": 4.582404670940021e-05, | |
| "loss": 0.1021, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.5625, | |
| "grad_norm": 0.011526420954707487, | |
| "learning_rate": 4.437484833511499e-05, | |
| "loss": 0.0745, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.59375, | |
| "grad_norm": 0.010556992532812162, | |
| "learning_rate": 4.29198125864363e-05, | |
| "loss": 0.0993, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.625, | |
| "grad_norm": 0.009364390772519583, | |
| "learning_rate": 4.1460880922306367e-05, | |
| "loss": 0.0837, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.65625, | |
| "grad_norm": 0.010733309233216014, | |
| "learning_rate": 4e-05, | |
| "loss": 0.0727, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.6875, | |
| "grad_norm": 0.009948186524320463, | |
| "learning_rate": 3.853911907769365e-05, | |
| "loss": 0.0753, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.71875, | |
| "grad_norm": 0.008731121930922115, | |
| "learning_rate": 3.7080187413563696e-05, | |
| "loss": 0.0899, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.008819596606521584, | |
| "learning_rate": 3.5625151664885036e-05, | |
| "loss": 0.1033, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.78125, | |
| "grad_norm": 0.008939943194000786, | |
| "learning_rate": 3.417595329059982e-05, | |
| "loss": 0.0884, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.8125, | |
| "grad_norm": 0.010372299887084407, | |
| "learning_rate": 3.2734525960822545e-05, | |
| "loss": 0.0715, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.84375, | |
| "grad_norm": 0.008056358111729533, | |
| "learning_rate": 3.1302792976737726e-05, | |
| "loss": 0.071, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 0.009388297857955292, | |
| "learning_rate": 2.988266470433277e-05, | |
| "loss": 0.0668, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.90625, | |
| "grad_norm": 0.007081043834040608, | |
| "learning_rate": 2.84760360253905e-05, | |
| "loss": 0.0673, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.9375, | |
| "grad_norm": 0.009735247855895699, | |
| "learning_rate": 2.7084783809142164e-05, | |
| "loss": 0.073, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.96875, | |
| "grad_norm": 0.00902307523218325, | |
| "learning_rate": 2.5710764407954692e-05, | |
| "loss": 0.0715, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.009251890848275948, | |
| "learning_rate": 2.4355811180393767e-05, | |
| "loss": 0.0665, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.03125, | |
| "grad_norm": 0.009574242139790865, | |
| "learning_rate": 2.3021732044967405e-05, | |
| "loss": 0.0833, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.0625, | |
| "grad_norm": 0.00845797267194501, | |
| "learning_rate": 2.171030706781446e-05, | |
| "loss": 0.0767, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 2.09375, | |
| "grad_norm": 0.00851640820916414, | |
| "learning_rate": 2.042328608755659e-05, | |
| "loss": 0.0719, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 2.125, | |
| "grad_norm": 0.006554791080981734, | |
| "learning_rate": 1.9162386380482795e-05, | |
| "loss": 0.0791, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.15625, | |
| "grad_norm": 0.0075086478549201046, | |
| "learning_rate": 1.7929290369182163e-05, | |
| "loss": 0.0757, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.1875, | |
| "grad_norm": 0.006982976291437535, | |
| "learning_rate": 1.6725643377681893e-05, | |
| "loss": 0.0737, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.21875, | |
| "grad_norm": 0.0072966512436396175, | |
| "learning_rate": 1.555305143608603e-05, | |
| "loss": 0.0614, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 0.010255244256125031, | |
| "learning_rate": 1.4413079137644358e-05, | |
| "loss": 0.0785, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 2.28125, | |
| "grad_norm": 0.007224969439463847, | |
| "learning_rate": 1.3307247551110427e-05, | |
| "loss": 0.0671, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.3125, | |
| "grad_norm": 0.006306350065097052, | |
| "learning_rate": 1.2237032191174642e-05, | |
| "loss": 0.0758, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.34375, | |
| "grad_norm": 0.006644867462013387, | |
| "learning_rate": 1.1203861049680174e-05, | |
| "loss": 0.0729, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.375, | |
| "grad_norm": 0.005748593491336838, | |
| "learning_rate": 1.0209112690248726e-05, | |
| "loss": 0.0778, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.40625, | |
| "grad_norm": 0.008966184904909018, | |
| "learning_rate": 9.254114408858714e-06, | |
| "loss": 0.0968, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 2.4375, | |
| "grad_norm": 0.007694751986125674, | |
| "learning_rate": 8.34014046282984e-06, | |
| "loss": 0.0802, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 2.46875, | |
| "grad_norm": 0.0063823599760032676, | |
| "learning_rate": 7.468410370577386e-06, | |
| "loss": 0.0673, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.008450322134241711, | |
| "learning_rate": 6.640087284404888e-06, | |
| "loss": 0.0666, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.53125, | |
| "grad_norm": 0.006709629969054719, | |
| "learning_rate": 5.856276438506143e-06, | |
| "loss": 0.0997, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 2.5625, | |
| "grad_norm": 0.007158750558627561, | |
| "learning_rate": 5.118023674247692e-06, | |
| "loss": 0.0637, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 2.59375, | |
| "grad_norm": 0.007755779440490774, | |
| "learning_rate": 4.426314044699247e-06, | |
| "loss": 0.0737, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 2.625, | |
| "grad_norm": 0.007329360026099688, | |
| "learning_rate": 3.7820705002741353e-06, | |
| "loss": 0.0737, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.65625, | |
| "grad_norm": 0.006341855163676503, | |
| "learning_rate": 3.1861526572336276e-06, | |
| "loss": 0.0714, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.6875, | |
| "grad_norm": 0.00790287372563921, | |
| "learning_rate": 2.6393556506980834e-06, | |
| "loss": 0.0713, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 2.71875, | |
| "grad_norm": 0.008053046048268534, | |
| "learning_rate": 2.142409073695624e-06, | |
| "loss": 0.0743, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 0.006069808898239832, | |
| "learning_rate": 1.6959760036637662e-06, | |
| "loss": 0.0628, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 2.78125, | |
| "grad_norm": 0.007254457262914832, | |
| "learning_rate": 1.3006521177029918e-06, | |
| "loss": 0.0657, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 2.8125, | |
| "grad_norm": 0.012422401519455362, | |
| "learning_rate": 9.569648977629176e-07, | |
| "loss": 0.069, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.84375, | |
| "grad_norm": 0.006051084453406604, | |
| "learning_rate": 6.653729268213571e-07, | |
| "loss": 0.0726, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 2.875, | |
| "grad_norm": 0.009547135845099962, | |
| "learning_rate": 4.2626527699558996e-07, | |
| "loss": 0.0667, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 2.90625, | |
| "grad_norm": 0.006957923092475667, | |
| "learning_rate": 2.399609904021638e-07, | |
| "loss": 0.0702, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 2.9375, | |
| "grad_norm": 0.006164626385638639, | |
| "learning_rate": 1.0670865345793425e-07, | |
| "loss": 0.0675, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 2.96875, | |
| "grad_norm": 0.007690808396614983, | |
| "learning_rate": 2.668606519042438e-08, | |
| "loss": 0.0781, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.006797357211691852, | |
| "learning_rate": 0.0, | |
| "loss": 0.0718, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 96, | |
| "total_flos": 95673135267840.0, | |
| "train_loss": 0.10282962660615642, | |
| "train_runtime": 1573.3331, | |
| "train_samples_per_second": 0.477, | |
| "train_steps_per_second": 0.061 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 96, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 95673135267840.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |