| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 96, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03125, | |
| "grad_norm": 0.015555548226235847, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.1795, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0625, | |
| "grad_norm": 0.013800665219008365, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.1768, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.09375, | |
| "grad_norm": 0.014132329892254962, | |
| "learning_rate": 2.4e-05, | |
| "loss": 0.1921, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 0.013647812570140987, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.1805, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.15625, | |
| "grad_norm": 0.014203214484248227, | |
| "learning_rate": 4e-05, | |
| "loss": 0.1875, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1875, | |
| "grad_norm": 0.015990651573010717, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.1877, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.21875, | |
| "grad_norm": 0.01827057279432701, | |
| "learning_rate": 5.6e-05, | |
| "loss": 0.1899, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.021372181025502847, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 0.1806, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.28125, | |
| "grad_norm": 0.022264619249609215, | |
| "learning_rate": 7.2e-05, | |
| "loss": 0.1725, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.3125, | |
| "grad_norm": 0.022481761148111745, | |
| "learning_rate": 8e-05, | |
| "loss": 0.1769, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.34375, | |
| "grad_norm": 0.02199281089335684, | |
| "learning_rate": 7.997331393480957e-05, | |
| "loss": 0.1623, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 0.02415170895576771, | |
| "learning_rate": 7.989329134654207e-05, | |
| "loss": 0.1483, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.40625, | |
| "grad_norm": 0.025368950292646637, | |
| "learning_rate": 7.976003900959785e-05, | |
| "loss": 0.1466, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.4375, | |
| "grad_norm": 0.023042728496009402, | |
| "learning_rate": 7.957373472300442e-05, | |
| "loss": 0.144, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.46875, | |
| "grad_norm": 0.01841107430123264, | |
| "learning_rate": 7.933462707317864e-05, | |
| "loss": 0.1484, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.01932089572434789, | |
| "learning_rate": 7.90430351022371e-05, | |
| "loss": 0.15, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.53125, | |
| "grad_norm": 0.021053052797474766, | |
| "learning_rate": 7.869934788229701e-05, | |
| "loss": 0.1421, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.5625, | |
| "grad_norm": 0.014016560043928594, | |
| "learning_rate": 7.830402399633624e-05, | |
| "loss": 0.1262, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.59375, | |
| "grad_norm": 0.017650483638440864, | |
| "learning_rate": 7.785759092630437e-05, | |
| "loss": 0.1237, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 0.010743267827126108, | |
| "learning_rate": 7.736064434930193e-05, | |
| "loss": 0.1116, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.65625, | |
| "grad_norm": 0.013230764904715359, | |
| "learning_rate": 7.681384734276638e-05, | |
| "loss": 0.1206, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.6875, | |
| "grad_norm": 0.013016489761578938, | |
| "learning_rate": 7.621792949972588e-05, | |
| "loss": 0.1186, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.71875, | |
| "grad_norm": 0.011247812663453547, | |
| "learning_rate": 7.557368595530076e-05, | |
| "loss": 0.121, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.013149205231347926, | |
| "learning_rate": 7.488197632575232e-05, | |
| "loss": 0.106, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.78125, | |
| "grad_norm": 0.012586933775444346, | |
| "learning_rate": 7.414372356149387e-05, | |
| "loss": 0.0965, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.8125, | |
| "grad_norm": 0.010763937752964619, | |
| "learning_rate": 7.335991271559512e-05, | |
| "loss": 0.0906, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.84375, | |
| "grad_norm": 0.011304615647994036, | |
| "learning_rate": 7.253158962942263e-05, | |
| "loss": 0.0993, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.875, | |
| "grad_norm": 0.013556772709000629, | |
| "learning_rate": 7.165985953717017e-05, | |
| "loss": 0.1135, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.90625, | |
| "grad_norm": 0.012889645148429477, | |
| "learning_rate": 7.074588559114129e-05, | |
| "loss": 0.0885, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.9375, | |
| "grad_norm": 0.011941110268551398, | |
| "learning_rate": 6.979088730975128e-05, | |
| "loss": 0.0874, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.96875, | |
| "grad_norm": 0.011444163436616204, | |
| "learning_rate": 6.879613895031985e-05, | |
| "loss": 0.0943, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.009679878401660093, | |
| "learning_rate": 6.776296780882537e-05, | |
| "loss": 0.0833, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.03125, | |
| "grad_norm": 0.011482692534722843, | |
| "learning_rate": 6.669275244888958e-05, | |
| "loss": 0.0801, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.0625, | |
| "grad_norm": 0.009816652981859712, | |
| "learning_rate": 6.558692086235565e-05, | |
| "loss": 0.0697, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.09375, | |
| "grad_norm": 0.013694898335999383, | |
| "learning_rate": 6.444694856391398e-05, | |
| "loss": 0.0873, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.125, | |
| "grad_norm": 0.010058784335958661, | |
| "learning_rate": 6.327435662231812e-05, | |
| "loss": 0.0659, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.15625, | |
| "grad_norm": 0.01165954318380878, | |
| "learning_rate": 6.207070963081785e-05, | |
| "loss": 0.0679, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.1875, | |
| "grad_norm": 0.008804730081572404, | |
| "learning_rate": 6.083761361951722e-05, | |
| "loss": 0.0835, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.21875, | |
| "grad_norm": 0.009606165758522137, | |
| "learning_rate": 5.9576713912443424e-05, | |
| "loss": 0.0669, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.009366815637622117, | |
| "learning_rate": 5.8289692932185546e-05, | |
| "loss": 0.0695, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.28125, | |
| "grad_norm": 0.010023932857114864, | |
| "learning_rate": 5.697826795503261e-05, | |
| "loss": 0.0742, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.3125, | |
| "grad_norm": 0.009347937286676341, | |
| "learning_rate": 5.564418881960624e-05, | |
| "loss": 0.068, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.34375, | |
| "grad_norm": 0.008738214036502606, | |
| "learning_rate": 5.428923559204531e-05, | |
| "loss": 0.0748, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.375, | |
| "grad_norm": 0.008858442662630537, | |
| "learning_rate": 5.291521619085785e-05, | |
| "loss": 0.063, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.40625, | |
| "grad_norm": 0.009792508058639961, | |
| "learning_rate": 5.1523963974609515e-05, | |
| "loss": 0.0597, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.4375, | |
| "grad_norm": 0.01073916250749086, | |
| "learning_rate": 5.011733529566723e-05, | |
| "loss": 0.051, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.46875, | |
| "grad_norm": 0.009449361060178994, | |
| "learning_rate": 4.869720702326229e-05, | |
| "loss": 0.0673, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.011172767655702232, | |
| "learning_rate": 4.726547403917746e-05, | |
| "loss": 0.0545, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.53125, | |
| "grad_norm": 0.010438099239806306, | |
| "learning_rate": 4.582404670940021e-05, | |
| "loss": 0.0602, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.5625, | |
| "grad_norm": 0.010052843717116185, | |
| "learning_rate": 4.437484833511499e-05, | |
| "loss": 0.0708, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.59375, | |
| "grad_norm": 0.008957697137477696, | |
| "learning_rate": 4.29198125864363e-05, | |
| "loss": 0.0536, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.625, | |
| "grad_norm": 0.008575506485091026, | |
| "learning_rate": 4.1460880922306367e-05, | |
| "loss": 0.0593, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.65625, | |
| "grad_norm": 0.007526032951701283, | |
| "learning_rate": 4e-05, | |
| "loss": 0.0527, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.6875, | |
| "grad_norm": 0.00799222837085378, | |
| "learning_rate": 3.853911907769365e-05, | |
| "loss": 0.0577, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.71875, | |
| "grad_norm": 0.007560112609564607, | |
| "learning_rate": 3.7080187413563696e-05, | |
| "loss": 0.0494, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.008219390198929118, | |
| "learning_rate": 3.5625151664885036e-05, | |
| "loss": 0.0476, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.78125, | |
| "grad_norm": 0.007634606221055081, | |
| "learning_rate": 3.417595329059982e-05, | |
| "loss": 0.0713, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.8125, | |
| "grad_norm": 0.009308779174130916, | |
| "learning_rate": 3.2734525960822545e-05, | |
| "loss": 0.0499, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.84375, | |
| "grad_norm": 0.01302955963204288, | |
| "learning_rate": 3.1302792976737726e-05, | |
| "loss": 0.0564, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 0.00781383008144499, | |
| "learning_rate": 2.988266470433277e-05, | |
| "loss": 0.0469, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.90625, | |
| "grad_norm": 0.010329373661438091, | |
| "learning_rate": 2.84760360253905e-05, | |
| "loss": 0.0551, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.9375, | |
| "grad_norm": 0.010789577548416482, | |
| "learning_rate": 2.7084783809142164e-05, | |
| "loss": 0.0519, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.96875, | |
| "grad_norm": 0.008393256765385788, | |
| "learning_rate": 2.5710764407954692e-05, | |
| "loss": 0.0574, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.012420419649732894, | |
| "learning_rate": 2.4355811180393767e-05, | |
| "loss": 0.063, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.03125, | |
| "grad_norm": 0.00953342730783419, | |
| "learning_rate": 2.3021732044967405e-05, | |
| "loss": 0.0518, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.0625, | |
| "grad_norm": 0.006266986802305626, | |
| "learning_rate": 2.171030706781446e-05, | |
| "loss": 0.0457, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 2.09375, | |
| "grad_norm": 0.006805809975583902, | |
| "learning_rate": 2.042328608755659e-05, | |
| "loss": 0.0567, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 2.125, | |
| "grad_norm": 0.009044351352133056, | |
| "learning_rate": 1.9162386380482795e-05, | |
| "loss": 0.0732, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.15625, | |
| "grad_norm": 0.007550843275916376, | |
| "learning_rate": 1.7929290369182163e-05, | |
| "loss": 0.0619, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.1875, | |
| "grad_norm": 0.007193561785770629, | |
| "learning_rate": 1.6725643377681893e-05, | |
| "loss": 0.055, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.21875, | |
| "grad_norm": 0.010189583928887995, | |
| "learning_rate": 1.555305143608603e-05, | |
| "loss": 0.0495, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 0.009025767645940064, | |
| "learning_rate": 1.4413079137644358e-05, | |
| "loss": 0.0431, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 2.28125, | |
| "grad_norm": 0.007431093955637808, | |
| "learning_rate": 1.3307247551110427e-05, | |
| "loss": 0.0538, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.3125, | |
| "grad_norm": 0.009100027360819555, | |
| "learning_rate": 1.2237032191174642e-05, | |
| "loss": 0.0551, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.34375, | |
| "grad_norm": 0.007518728758196599, | |
| "learning_rate": 1.1203861049680174e-05, | |
| "loss": 0.0463, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.375, | |
| "grad_norm": 0.007565354370617503, | |
| "learning_rate": 1.0209112690248726e-05, | |
| "loss": 0.0538, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.40625, | |
| "grad_norm": 0.006992865554785582, | |
| "learning_rate": 9.254114408858714e-06, | |
| "loss": 0.0576, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 2.4375, | |
| "grad_norm": 0.00936094228371784, | |
| "learning_rate": 8.34014046282984e-06, | |
| "loss": 0.0487, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 2.46875, | |
| "grad_norm": 0.007448586675303377, | |
| "learning_rate": 7.468410370577386e-06, | |
| "loss": 0.052, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.00812554857117024, | |
| "learning_rate": 6.640087284404888e-06, | |
| "loss": 0.0448, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.53125, | |
| "grad_norm": 0.0067932796539546475, | |
| "learning_rate": 5.856276438506143e-06, | |
| "loss": 0.0568, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 2.5625, | |
| "grad_norm": 0.008477078414641319, | |
| "learning_rate": 5.118023674247692e-06, | |
| "loss": 0.0471, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 2.59375, | |
| "grad_norm": 0.008294019299067519, | |
| "learning_rate": 4.426314044699247e-06, | |
| "loss": 0.0493, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 2.625, | |
| "grad_norm": 0.007519169307600042, | |
| "learning_rate": 3.7820705002741353e-06, | |
| "loss": 0.0701, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.65625, | |
| "grad_norm": 0.008489062420099343, | |
| "learning_rate": 3.1861526572336276e-06, | |
| "loss": 0.0514, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.6875, | |
| "grad_norm": 0.009566111868708226, | |
| "learning_rate": 2.6393556506980834e-06, | |
| "loss": 0.052, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 2.71875, | |
| "grad_norm": 0.008181379114013855, | |
| "learning_rate": 2.142409073695624e-06, | |
| "loss": 0.0596, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 0.007830248340854431, | |
| "learning_rate": 1.6959760036637662e-06, | |
| "loss": 0.0504, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 2.78125, | |
| "grad_norm": 0.006242249688909198, | |
| "learning_rate": 1.3006521177029918e-06, | |
| "loss": 0.0387, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 2.8125, | |
| "grad_norm": 0.009225967971772085, | |
| "learning_rate": 9.569648977629176e-07, | |
| "loss": 0.0491, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.84375, | |
| "grad_norm": 0.008142330300088551, | |
| "learning_rate": 6.653729268213571e-07, | |
| "loss": 0.0485, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 2.875, | |
| "grad_norm": 0.00971751151068613, | |
| "learning_rate": 4.2626527699558996e-07, | |
| "loss": 0.0512, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 2.90625, | |
| "grad_norm": 0.006895106996204438, | |
| "learning_rate": 2.399609904021638e-07, | |
| "loss": 0.0491, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 2.9375, | |
| "grad_norm": 0.006971470736899182, | |
| "learning_rate": 1.0670865345793425e-07, | |
| "loss": 0.0465, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 2.96875, | |
| "grad_norm": 0.008102324859504847, | |
| "learning_rate": 2.668606519042438e-08, | |
| "loss": 0.0645, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.0071402049352925065, | |
| "learning_rate": 0.0, | |
| "loss": 0.0515, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 96, | |
| "total_flos": 84358539640832.0, | |
| "train_loss": 0.08476965913238625, | |
| "train_runtime": 1498.8136, | |
| "train_samples_per_second": 0.5, | |
| "train_steps_per_second": 0.064 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 96, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 84358539640832.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |