| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 96, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03125, | |
| "grad_norm": 0.014413790006272076, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.1832, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0625, | |
| "grad_norm": 0.015286289251017835, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.1971, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.09375, | |
| "grad_norm": 0.013808518858401233, | |
| "learning_rate": 2.4e-05, | |
| "loss": 0.1893, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 0.013703889612803492, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.1785, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.15625, | |
| "grad_norm": 0.014802227050922873, | |
| "learning_rate": 4e-05, | |
| "loss": 0.1902, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1875, | |
| "grad_norm": 0.016201544995199633, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.1835, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.21875, | |
| "grad_norm": 0.021772630295473456, | |
| "learning_rate": 5.6e-05, | |
| "loss": 0.1809, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.020211017368659157, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 0.1761, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.28125, | |
| "grad_norm": 0.0260734966065386, | |
| "learning_rate": 7.2e-05, | |
| "loss": 0.1718, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.3125, | |
| "grad_norm": 0.017366310053783893, | |
| "learning_rate": 8e-05, | |
| "loss": 0.1781, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.34375, | |
| "grad_norm": 0.025781693597848775, | |
| "learning_rate": 7.997331393480957e-05, | |
| "loss": 0.1494, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 0.02415549236441502, | |
| "learning_rate": 7.989329134654207e-05, | |
| "loss": 0.1568, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.40625, | |
| "grad_norm": 0.026639654546969385, | |
| "learning_rate": 7.976003900959785e-05, | |
| "loss": 0.1535, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.4375, | |
| "grad_norm": 0.023239148608094912, | |
| "learning_rate": 7.957373472300442e-05, | |
| "loss": 0.1452, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.46875, | |
| "grad_norm": 0.019646912515308475, | |
| "learning_rate": 7.933462707317864e-05, | |
| "loss": 0.146, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.016328052850393588, | |
| "learning_rate": 7.90430351022371e-05, | |
| "loss": 0.1346, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.53125, | |
| "grad_norm": 0.016311478173498328, | |
| "learning_rate": 7.869934788229701e-05, | |
| "loss": 0.1306, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.5625, | |
| "grad_norm": 0.020932933884698322, | |
| "learning_rate": 7.830402399633624e-05, | |
| "loss": 0.1347, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.59375, | |
| "grad_norm": 0.01568803210591663, | |
| "learning_rate": 7.785759092630437e-05, | |
| "loss": 0.1252, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 0.013614526922555072, | |
| "learning_rate": 7.736064434930193e-05, | |
| "loss": 0.1211, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.65625, | |
| "grad_norm": 0.012132527228294816, | |
| "learning_rate": 7.681384734276638e-05, | |
| "loss": 0.1294, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.6875, | |
| "grad_norm": 0.013945329214064448, | |
| "learning_rate": 7.621792949972588e-05, | |
| "loss": 0.121, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.71875, | |
| "grad_norm": 0.012905759587315598, | |
| "learning_rate": 7.557368595530076e-05, | |
| "loss": 0.1177, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.011280852838475014, | |
| "learning_rate": 7.488197632575232e-05, | |
| "loss": 0.1026, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.78125, | |
| "grad_norm": 0.01202019897378626, | |
| "learning_rate": 7.414372356149387e-05, | |
| "loss": 0.101, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.8125, | |
| "grad_norm": 0.011627238221270868, | |
| "learning_rate": 7.335991271559512e-05, | |
| "loss": 0.0965, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.84375, | |
| "grad_norm": 0.012789799522618486, | |
| "learning_rate": 7.253158962942263e-05, | |
| "loss": 0.0944, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.875, | |
| "grad_norm": 0.013689289713544437, | |
| "learning_rate": 7.165985953717017e-05, | |
| "loss": 0.0942, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.90625, | |
| "grad_norm": 0.01389761693862599, | |
| "learning_rate": 7.074588559114129e-05, | |
| "loss": 0.0881, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.9375, | |
| "grad_norm": 0.013417207563330657, | |
| "learning_rate": 6.979088730975128e-05, | |
| "loss": 0.0873, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.96875, | |
| "grad_norm": 0.01336505741138548, | |
| "learning_rate": 6.879613895031985e-05, | |
| "loss": 0.0921, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.01091689021316452, | |
| "learning_rate": 6.776296780882537e-05, | |
| "loss": 0.0801, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.03125, | |
| "grad_norm": 0.011023462796598138, | |
| "learning_rate": 6.669275244888958e-05, | |
| "loss": 0.0732, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.0625, | |
| "grad_norm": 0.012014361305470102, | |
| "learning_rate": 6.558692086235565e-05, | |
| "loss": 0.0727, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.09375, | |
| "grad_norm": 0.013884789458039141, | |
| "learning_rate": 6.444694856391398e-05, | |
| "loss": 0.077, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.125, | |
| "grad_norm": 0.010903558896861586, | |
| "learning_rate": 6.327435662231812e-05, | |
| "loss": 0.0731, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.15625, | |
| "grad_norm": 0.011549472776983169, | |
| "learning_rate": 6.207070963081785e-05, | |
| "loss": 0.0716, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.1875, | |
| "grad_norm": 0.009548160145978311, | |
| "learning_rate": 6.083761361951722e-05, | |
| "loss": 0.0742, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.21875, | |
| "grad_norm": 0.010232917592466603, | |
| "learning_rate": 5.9576713912443424e-05, | |
| "loss": 0.0695, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.010299916696845978, | |
| "learning_rate": 5.8289692932185546e-05, | |
| "loss": 0.0619, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.28125, | |
| "grad_norm": 0.011094091138360133, | |
| "learning_rate": 5.697826795503261e-05, | |
| "loss": 0.0776, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.3125, | |
| "grad_norm": 0.009385162611199102, | |
| "learning_rate": 5.564418881960624e-05, | |
| "loss": 0.0649, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.34375, | |
| "grad_norm": 0.011007640882632121, | |
| "learning_rate": 5.428923559204531e-05, | |
| "loss": 0.0627, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.375, | |
| "grad_norm": 0.010156882406149289, | |
| "learning_rate": 5.291521619085785e-05, | |
| "loss": 0.0603, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.40625, | |
| "grad_norm": 0.00966078242953305, | |
| "learning_rate": 5.1523963974609515e-05, | |
| "loss": 0.0679, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.4375, | |
| "grad_norm": 0.011357931233504452, | |
| "learning_rate": 5.011733529566723e-05, | |
| "loss": 0.0609, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.46875, | |
| "grad_norm": 0.010247334892864645, | |
| "learning_rate": 4.869720702326229e-05, | |
| "loss": 0.0605, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.009352017507113715, | |
| "learning_rate": 4.726547403917746e-05, | |
| "loss": 0.0488, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.53125, | |
| "grad_norm": 0.008186790068143334, | |
| "learning_rate": 4.582404670940021e-05, | |
| "loss": 0.0596, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.5625, | |
| "grad_norm": 0.007624145637993475, | |
| "learning_rate": 4.437484833511499e-05, | |
| "loss": 0.049, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.59375, | |
| "grad_norm": 0.010047711078895386, | |
| "learning_rate": 4.29198125864363e-05, | |
| "loss": 0.0774, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.625, | |
| "grad_norm": 0.013252922529623892, | |
| "learning_rate": 4.1460880922306367e-05, | |
| "loss": 0.0622, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.65625, | |
| "grad_norm": 0.011309751062269268, | |
| "learning_rate": 4e-05, | |
| "loss": 0.0612, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.6875, | |
| "grad_norm": 0.009236331962381933, | |
| "learning_rate": 3.853911907769365e-05, | |
| "loss": 0.0487, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.71875, | |
| "grad_norm": 0.008763988624638465, | |
| "learning_rate": 3.7080187413563696e-05, | |
| "loss": 0.0562, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.006909623925582444, | |
| "learning_rate": 3.5625151664885036e-05, | |
| "loss": 0.0549, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.78125, | |
| "grad_norm": 0.00928674005234459, | |
| "learning_rate": 3.417595329059982e-05, | |
| "loss": 0.0611, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.8125, | |
| "grad_norm": 0.007663262106932942, | |
| "learning_rate": 3.2734525960822545e-05, | |
| "loss": 0.0536, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.84375, | |
| "grad_norm": 0.010176305756975843, | |
| "learning_rate": 3.1302792976737726e-05, | |
| "loss": 0.0677, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 0.006868999450668199, | |
| "learning_rate": 2.988266470433277e-05, | |
| "loss": 0.0434, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.90625, | |
| "grad_norm": 0.009318131227932411, | |
| "learning_rate": 2.84760360253905e-05, | |
| "loss": 0.0596, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.9375, | |
| "grad_norm": 0.010124321764307779, | |
| "learning_rate": 2.7084783809142164e-05, | |
| "loss": 0.0528, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.96875, | |
| "grad_norm": 0.00951481684638266, | |
| "learning_rate": 2.5710764407954692e-05, | |
| "loss": 0.072, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.008093843355339957, | |
| "learning_rate": 2.4355811180393767e-05, | |
| "loss": 0.0453, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.03125, | |
| "grad_norm": 0.006112084669096018, | |
| "learning_rate": 2.3021732044967405e-05, | |
| "loss": 0.0635, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.0625, | |
| "grad_norm": 0.007296362075154464, | |
| "learning_rate": 2.171030706781446e-05, | |
| "loss": 0.0696, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 2.09375, | |
| "grad_norm": 0.008303190097669362, | |
| "learning_rate": 2.042328608755659e-05, | |
| "loss": 0.0546, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 2.125, | |
| "grad_norm": 0.007598832672231313, | |
| "learning_rate": 1.9162386380482795e-05, | |
| "loss": 0.061, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.15625, | |
| "grad_norm": 0.007968601948142437, | |
| "learning_rate": 1.7929290369182163e-05, | |
| "loss": 0.0674, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.1875, | |
| "grad_norm": 0.008045459418329922, | |
| "learning_rate": 1.6725643377681893e-05, | |
| "loss": 0.0492, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.21875, | |
| "grad_norm": 0.006696151841334819, | |
| "learning_rate": 1.555305143608603e-05, | |
| "loss": 0.0577, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 0.006461733615179813, | |
| "learning_rate": 1.4413079137644358e-05, | |
| "loss": 0.0512, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 2.28125, | |
| "grad_norm": 0.009050679653670176, | |
| "learning_rate": 1.3307247551110427e-05, | |
| "loss": 0.0527, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.3125, | |
| "grad_norm": 0.006589522882782293, | |
| "learning_rate": 1.2237032191174642e-05, | |
| "loss": 0.0618, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.34375, | |
| "grad_norm": 0.008042442255031333, | |
| "learning_rate": 1.1203861049680174e-05, | |
| "loss": 0.0477, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.375, | |
| "grad_norm": 0.010173756967121942, | |
| "learning_rate": 1.0209112690248726e-05, | |
| "loss": 0.0495, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.40625, | |
| "grad_norm": 0.006896239372707892, | |
| "learning_rate": 9.254114408858714e-06, | |
| "loss": 0.052, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 2.4375, | |
| "grad_norm": 0.007939279196130926, | |
| "learning_rate": 8.34014046282984e-06, | |
| "loss": 0.0556, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 2.46875, | |
| "grad_norm": 0.0070869043431510985, | |
| "learning_rate": 7.468410370577386e-06, | |
| "loss": 0.0515, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.008344790590566956, | |
| "learning_rate": 6.640087284404888e-06, | |
| "loss": 0.045, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.53125, | |
| "grad_norm": 0.007983244643460737, | |
| "learning_rate": 5.856276438506143e-06, | |
| "loss": 0.0508, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 2.5625, | |
| "grad_norm": 0.00824835850338399, | |
| "learning_rate": 5.118023674247692e-06, | |
| "loss": 0.0491, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 2.59375, | |
| "grad_norm": 0.0073374739634495196, | |
| "learning_rate": 4.426314044699247e-06, | |
| "loss": 0.0534, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 2.625, | |
| "grad_norm": 0.00837098738346038, | |
| "learning_rate": 3.7820705002741353e-06, | |
| "loss": 0.0583, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.65625, | |
| "grad_norm": 0.008884504430927096, | |
| "learning_rate": 3.1861526572336276e-06, | |
| "loss": 0.0444, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.6875, | |
| "grad_norm": 0.006183009130438192, | |
| "learning_rate": 2.6393556506980834e-06, | |
| "loss": 0.0523, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 2.71875, | |
| "grad_norm": 0.006621560461357937, | |
| "learning_rate": 2.142409073695624e-06, | |
| "loss": 0.0581, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 0.008310644624835817, | |
| "learning_rate": 1.6959760036637662e-06, | |
| "loss": 0.0553, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 2.78125, | |
| "grad_norm": 0.006385939292701655, | |
| "learning_rate": 1.3006521177029918e-06, | |
| "loss": 0.0393, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 2.8125, | |
| "grad_norm": 0.008205777819110395, | |
| "learning_rate": 9.569648977629176e-07, | |
| "loss": 0.0468, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.84375, | |
| "grad_norm": 0.009393866216782433, | |
| "learning_rate": 6.653729268213571e-07, | |
| "loss": 0.0478, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 2.875, | |
| "grad_norm": 0.006988704076284933, | |
| "learning_rate": 4.2626527699558996e-07, | |
| "loss": 0.0464, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 2.90625, | |
| "grad_norm": 0.007264320489685286, | |
| "learning_rate": 2.399609904021638e-07, | |
| "loss": 0.0502, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 2.9375, | |
| "grad_norm": 0.006273648479325859, | |
| "learning_rate": 1.0670865345793425e-07, | |
| "loss": 0.0442, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 2.96875, | |
| "grad_norm": 0.00748221323914626, | |
| "learning_rate": 2.668606519042438e-08, | |
| "loss": 0.0525, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.0058201687214057786, | |
| "learning_rate": 0.0, | |
| "loss": 0.0615, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 96, | |
| "total_flos": 84946498682880.0, | |
| "train_loss": 0.08471203568236281, | |
| "train_runtime": 1511.4146, | |
| "train_samples_per_second": 0.496, | |
| "train_steps_per_second": 0.064 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 96, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 84946498682880.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |