| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 12.30379746835443, | |
| "eval_steps": 500, | |
| "global_step": 117, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.10126582278481013, | |
| "grad_norm": 7.680033192188056, | |
| "learning_rate": 8.333333333333333e-07, | |
| "loss": 0.876, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.20253164556962025, | |
| "grad_norm": 7.605076623290318, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 0.8658, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.3037974683544304, | |
| "grad_norm": 6.778648218751108, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.8177, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.4050632911392405, | |
| "grad_norm": 6.509762542649743, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.7961, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.5063291139240507, | |
| "grad_norm": 5.60547296324972, | |
| "learning_rate": 4.166666666666667e-06, | |
| "loss": 0.8073, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.6075949367088608, | |
| "grad_norm": 2.9603418690542127, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7123, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.7088607594936709, | |
| "grad_norm": 2.6586350137017654, | |
| "learning_rate": 5.833333333333334e-06, | |
| "loss": 0.719, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.810126582278481, | |
| "grad_norm": 3.628503370339582, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.6693, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.9113924050632911, | |
| "grad_norm": 3.994437575777995, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.6902, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 1.0506329113924051, | |
| "grad_norm": 3.8311013222873855, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.6371, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.1518987341772151, | |
| "grad_norm": 4.300806163841766, | |
| "learning_rate": 9.166666666666666e-06, | |
| "loss": 0.6389, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 1.2531645569620253, | |
| "grad_norm": 3.763694085806352, | |
| "learning_rate": 1e-05, | |
| "loss": 0.5804, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.3544303797468356, | |
| "grad_norm": 2.9051031343691744, | |
| "learning_rate": 9.997762161417517e-06, | |
| "loss": 0.5677, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 1.4556962025316456, | |
| "grad_norm": 1.9919406509125026, | |
| "learning_rate": 9.991050648838676e-06, | |
| "loss": 0.5447, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.5569620253164556, | |
| "grad_norm": 2.214405561229308, | |
| "learning_rate": 9.979871469976197e-06, | |
| "loss": 0.5839, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.6582278481012658, | |
| "grad_norm": 2.4360157076781483, | |
| "learning_rate": 9.964234631709188e-06, | |
| "loss": 0.5685, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.759493670886076, | |
| "grad_norm": 2.266012553917941, | |
| "learning_rate": 9.944154131125643e-06, | |
| "loss": 0.5271, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 1.8607594936708862, | |
| "grad_norm": 1.6843183104054464, | |
| "learning_rate": 9.91964794299315e-06, | |
| "loss": 0.4994, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.9620253164556962, | |
| "grad_norm": 1.088950454916664, | |
| "learning_rate": 9.890738003669029e-06, | |
| "loss": 0.5071, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 2.1012658227848102, | |
| "grad_norm": 1.200233336627422, | |
| "learning_rate": 9.857450191464337e-06, | |
| "loss": 0.4746, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.2025316455696204, | |
| "grad_norm": 1.2637691080193945, | |
| "learning_rate": 9.819814303479268e-06, | |
| "loss": 0.4618, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 2.3037974683544302, | |
| "grad_norm": 1.3769873879592567, | |
| "learning_rate": 9.777864028930705e-06, | |
| "loss": 0.4508, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 2.4050632911392404, | |
| "grad_norm": 0.8682440711586037, | |
| "learning_rate": 9.731636918995821e-06, | |
| "loss": 0.3964, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.5063291139240507, | |
| "grad_norm": 0.9509335750395728, | |
| "learning_rate": 9.681174353198687e-06, | |
| "loss": 0.4052, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 2.607594936708861, | |
| "grad_norm": 1.1726269272210401, | |
| "learning_rate": 9.626521502369984e-06, | |
| "loss": 0.4393, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 2.708860759493671, | |
| "grad_norm": 1.0363716677545973, | |
| "learning_rate": 9.567727288213005e-06, | |
| "loss": 0.4439, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 2.810126582278481, | |
| "grad_norm": 0.8229007452516713, | |
| "learning_rate": 9.504844339512096e-06, | |
| "loss": 0.3915, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 2.911392405063291, | |
| "grad_norm": 0.9246655473068639, | |
| "learning_rate": 9.437928945022772e-06, | |
| "loss": 0.391, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 3.050632911392405, | |
| "grad_norm": 0.8702250576114761, | |
| "learning_rate": 9.36704100308565e-06, | |
| "loss": 0.4052, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 3.151898734177215, | |
| "grad_norm": 0.8951505231845288, | |
| "learning_rate": 9.292243968009332e-06, | |
| "loss": 0.3665, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 3.2531645569620253, | |
| "grad_norm": 0.8202167130655299, | |
| "learning_rate": 9.213604793270196e-06, | |
| "loss": 0.334, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 3.3544303797468356, | |
| "grad_norm": 0.7812315273644796, | |
| "learning_rate": 9.131193871579975e-06, | |
| "loss": 0.321, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 3.4556962025316453, | |
| "grad_norm": 0.9882162553157401, | |
| "learning_rate": 9.045084971874738e-06, | |
| "loss": 0.2901, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 3.5569620253164556, | |
| "grad_norm": 0.8109137436623326, | |
| "learning_rate": 8.955355173281709e-06, | |
| "loss": 0.2626, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 3.6582278481012658, | |
| "grad_norm": 0.7499397799268233, | |
| "learning_rate": 8.862084796122998e-06, | |
| "loss": 0.3374, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 3.759493670886076, | |
| "grad_norm": 0.8565071413317179, | |
| "learning_rate": 8.765357330018056e-06, | |
| "loss": 0.2619, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 3.8607594936708862, | |
| "grad_norm": 0.7524729309779767, | |
| "learning_rate": 8.665259359149132e-06, | |
| "loss": 0.2689, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 3.962025316455696, | |
| "grad_norm": 0.7531983387104568, | |
| "learning_rate": 8.561880484756726e-06, | |
| "loss": 0.3241, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 4.10126582278481, | |
| "grad_norm": 0.6868113396123745, | |
| "learning_rate": 8.455313244934324e-06, | |
| "loss": 0.2336, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 4.2025316455696204, | |
| "grad_norm": 0.6794221220377648, | |
| "learning_rate": 8.345653031794292e-06, | |
| "loss": 0.2686, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 4.30379746835443, | |
| "grad_norm": 0.7127200441976037, | |
| "learning_rate": 8.232998006078998e-06, | |
| "loss": 0.2302, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 4.405063291139241, | |
| "grad_norm": 0.9320401873680159, | |
| "learning_rate": 8.117449009293668e-06, | |
| "loss": 0.2307, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 4.506329113924051, | |
| "grad_norm": 0.741263544835177, | |
| "learning_rate": 7.99910947343957e-06, | |
| "loss": 0.2473, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 4.6075949367088604, | |
| "grad_norm": 0.6812338139520304, | |
| "learning_rate": 7.87808532842837e-06, | |
| "loss": 0.1837, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 4.708860759493671, | |
| "grad_norm": 0.7924153987851118, | |
| "learning_rate": 7.754484907260513e-06, | |
| "loss": 0.2433, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 4.810126582278481, | |
| "grad_norm": 0.704653416654649, | |
| "learning_rate": 7.628418849052523e-06, | |
| "loss": 0.226, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 4.911392405063291, | |
| "grad_norm": 0.7727681689903335, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.265, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 5.050632911392405, | |
| "grad_norm": 0.6187802591301362, | |
| "learning_rate": 7.369343312364994e-06, | |
| "loss": 0.1834, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 5.151898734177215, | |
| "grad_norm": 0.5876280442306084, | |
| "learning_rate": 7.236565741578163e-06, | |
| "loss": 0.1881, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 5.253164556962025, | |
| "grad_norm": 0.672959788742261, | |
| "learning_rate": 7.101786141547829e-06, | |
| "loss": 0.1319, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 5.3544303797468356, | |
| "grad_norm": 0.711584893214224, | |
| "learning_rate": 6.965125158269619e-06, | |
| "loss": 0.1769, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 5.455696202531645, | |
| "grad_norm": 0.7178272865398647, | |
| "learning_rate": 6.8267051218319766e-06, | |
| "loss": 0.2074, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 5.556962025316456, | |
| "grad_norm": 0.7830848244454021, | |
| "learning_rate": 6.686649936914151e-06, | |
| "loss": 0.2037, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 5.658227848101266, | |
| "grad_norm": 0.6179328370518615, | |
| "learning_rate": 6.545084971874738e-06, | |
| "loss": 0.1574, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 5.759493670886076, | |
| "grad_norm": 0.6670115628975251, | |
| "learning_rate": 6.402136946530014e-06, | |
| "loss": 0.161, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 5.860759493670886, | |
| "grad_norm": 0.6785055355731117, | |
| "learning_rate": 6.257933818722544e-06, | |
| "loss": 0.1791, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 5.962025316455696, | |
| "grad_norm": 0.6338228496221349, | |
| "learning_rate": 6.112604669781572e-06, | |
| "loss": 0.1689, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 6.10126582278481, | |
| "grad_norm": 0.5540510682761942, | |
| "learning_rate": 5.9662795889777666e-06, | |
| "loss": 0.1528, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 6.2025316455696204, | |
| "grad_norm": 0.5818482838873013, | |
| "learning_rate": 5.819089557075689e-06, | |
| "loss": 0.1912, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 6.30379746835443, | |
| "grad_norm": 0.6163781450110954, | |
| "learning_rate": 5.671166329088278e-06, | |
| "loss": 0.1284, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 6.405063291139241, | |
| "grad_norm": 0.677574989780631, | |
| "learning_rate": 5.522642316338268e-06, | |
| "loss": 0.1589, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 6.506329113924051, | |
| "grad_norm": 0.6435283762104541, | |
| "learning_rate": 5.373650467932122e-06, | |
| "loss": 0.1194, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 6.6075949367088604, | |
| "grad_norm": 0.6239949273645631, | |
| "learning_rate": 5.224324151752575e-06, | |
| "loss": 0.1045, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 6.708860759493671, | |
| "grad_norm": 0.572687410995689, | |
| "learning_rate": 5.074797035076319e-06, | |
| "loss": 0.1636, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 6.810126582278481, | |
| "grad_norm": 0.5449061137184503, | |
| "learning_rate": 4.9252029649236835e-06, | |
| "loss": 0.162, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 6.911392405063291, | |
| "grad_norm": 0.5475880191654193, | |
| "learning_rate": 4.775675848247427e-06, | |
| "loss": 0.0917, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 7.050632911392405, | |
| "grad_norm": 0.6385594435677409, | |
| "learning_rate": 4.626349532067879e-06, | |
| "loss": 0.0986, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 7.151898734177215, | |
| "grad_norm": 0.5885768339308608, | |
| "learning_rate": 4.477357683661734e-06, | |
| "loss": 0.1327, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 7.253164556962025, | |
| "grad_norm": 0.46414251730482564, | |
| "learning_rate": 4.3288336709117246e-06, | |
| "loss": 0.117, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 7.3544303797468356, | |
| "grad_norm": 0.4596678374134551, | |
| "learning_rate": 4.180910442924312e-06, | |
| "loss": 0.1052, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 7.455696202531645, | |
| "grad_norm": 0.6382991489091762, | |
| "learning_rate": 4.033720411022235e-06, | |
| "loss": 0.1396, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 7.556962025316456, | |
| "grad_norm": 0.5234765183562552, | |
| "learning_rate": 3.887395330218429e-06, | |
| "loss": 0.0927, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 7.658227848101266, | |
| "grad_norm": 0.5028097125423595, | |
| "learning_rate": 3.7420661812774577e-06, | |
| "loss": 0.1039, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 7.759493670886076, | |
| "grad_norm": 0.5612361364934108, | |
| "learning_rate": 3.5978630534699873e-06, | |
| "loss": 0.1089, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 7.860759493670886, | |
| "grad_norm": 0.5921818102172546, | |
| "learning_rate": 3.4549150281252635e-06, | |
| "loss": 0.0905, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 7.962025316455696, | |
| "grad_norm": 0.7756744852324812, | |
| "learning_rate": 3.3133500630858507e-06, | |
| "loss": 0.1256, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 8.10126582278481, | |
| "grad_norm": 0.46067091094544693, | |
| "learning_rate": 3.173294878168025e-06, | |
| "loss": 0.0695, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 8.20253164556962, | |
| "grad_norm": 0.5331891345046547, | |
| "learning_rate": 3.0348748417303826e-06, | |
| "loss": 0.1007, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 8.30379746835443, | |
| "grad_norm": 0.38056227705123474, | |
| "learning_rate": 2.8982138584521734e-06, | |
| "loss": 0.1152, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 8.405063291139241, | |
| "grad_norm": 0.47749834030257926, | |
| "learning_rate": 2.7634342584218364e-06, | |
| "loss": 0.1434, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 8.50632911392405, | |
| "grad_norm": 0.33181359449202125, | |
| "learning_rate": 2.6306566876350072e-06, | |
| "loss": 0.0601, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 8.60759493670886, | |
| "grad_norm": 0.3692812970351156, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 0.0461, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 8.708860759493671, | |
| "grad_norm": 0.5036817238632461, | |
| "learning_rate": 2.371581150947476e-06, | |
| "loss": 0.0994, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 8.810126582278482, | |
| "grad_norm": 0.5549186139256994, | |
| "learning_rate": 2.245515092739488e-06, | |
| "loss": 0.0947, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 8.91139240506329, | |
| "grad_norm": 0.5312869981574345, | |
| "learning_rate": 2.1219146715716332e-06, | |
| "loss": 0.0991, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 9.050632911392405, | |
| "grad_norm": 0.3759462356183745, | |
| "learning_rate": 2.0008905265604316e-06, | |
| "loss": 0.1179, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 9.151898734177216, | |
| "grad_norm": 0.33383214559138846, | |
| "learning_rate": 1.8825509907063328e-06, | |
| "loss": 0.0762, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 9.253164556962025, | |
| "grad_norm": 0.26312712916539965, | |
| "learning_rate": 1.7670019939210025e-06, | |
| "loss": 0.0745, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 9.354430379746836, | |
| "grad_norm": 0.30730572593714295, | |
| "learning_rate": 1.6543469682057105e-06, | |
| "loss": 0.0754, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 9.455696202531646, | |
| "grad_norm": 0.31093551470161, | |
| "learning_rate": 1.544686755065677e-06, | |
| "loss": 0.0825, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 9.556962025316455, | |
| "grad_norm": 0.3115908963835014, | |
| "learning_rate": 1.438119515243277e-06, | |
| "loss": 0.0856, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 9.658227848101266, | |
| "grad_norm": 0.3345152880560803, | |
| "learning_rate": 1.3347406408508695e-06, | |
| "loss": 0.0652, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 9.759493670886076, | |
| "grad_norm": 0.32769619009343287, | |
| "learning_rate": 1.234642669981946e-06, | |
| "loss": 0.0816, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 9.860759493670885, | |
| "grad_norm": 0.4334283532490735, | |
| "learning_rate": 1.137915203877003e-06, | |
| "loss": 0.0869, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 9.962025316455696, | |
| "grad_norm": 0.2877036407917396, | |
| "learning_rate": 1.044644826718295e-06, | |
| "loss": 0.077, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 10.10126582278481, | |
| "grad_norm": 0.24167386699272236, | |
| "learning_rate": 9.549150281252633e-07, | |
| "loss": 0.0721, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 10.20253164556962, | |
| "grad_norm": 0.19556579405259916, | |
| "learning_rate": 8.688061284200266e-07, | |
| "loss": 0.0383, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 10.30379746835443, | |
| "grad_norm": 0.2552062802729188, | |
| "learning_rate": 7.863952067298042e-07, | |
| "loss": 0.0824, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 10.405063291139241, | |
| "grad_norm": 0.2898550507651794, | |
| "learning_rate": 7.077560319906696e-07, | |
| "loss": 0.0874, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 10.50632911392405, | |
| "grad_norm": 0.4524836615561104, | |
| "learning_rate": 6.329589969143518e-07, | |
| "loss": 0.0519, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 10.60759493670886, | |
| "grad_norm": 0.2859763217293582, | |
| "learning_rate": 5.620710549772295e-07, | |
| "loss": 0.1038, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 10.708860759493671, | |
| "grad_norm": 0.2740176489434147, | |
| "learning_rate": 4.951556604879049e-07, | |
| "loss": 0.0724, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 10.810126582278482, | |
| "grad_norm": 0.28031594418514205, | |
| "learning_rate": 4.322727117869951e-07, | |
| "loss": 0.0913, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 10.91139240506329, | |
| "grad_norm": 0.29825597093016865, | |
| "learning_rate": 3.734784976300165e-07, | |
| "loss": 0.0667, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 11.050632911392405, | |
| "grad_norm": 0.26440161804086987, | |
| "learning_rate": 3.18825646801314e-07, | |
| "loss": 0.083, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 11.151898734177216, | |
| "grad_norm": 0.2264550862330323, | |
| "learning_rate": 2.6836308100417874e-07, | |
| "loss": 0.0452, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 11.253164556962025, | |
| "grad_norm": 0.20794688488652271, | |
| "learning_rate": 2.2213597106929608e-07, | |
| "loss": 0.0375, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 11.354430379746836, | |
| "grad_norm": 0.28341702236760624, | |
| "learning_rate": 1.801856965207338e-07, | |
| "loss": 0.1221, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 11.455696202531646, | |
| "grad_norm": 0.2432505235831095, | |
| "learning_rate": 1.4254980853566248e-07, | |
| "loss": 0.0469, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 11.556962025316455, | |
| "grad_norm": 0.23671574535859555, | |
| "learning_rate": 1.0926199633097156e-07, | |
| "loss": 0.0635, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 11.658227848101266, | |
| "grad_norm": 0.2882576880295542, | |
| "learning_rate": 8.035205700685167e-08, | |
| "loss": 0.1207, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 11.759493670886076, | |
| "grad_norm": 0.21717624854035625, | |
| "learning_rate": 5.584586887435739e-08, | |
| "loss": 0.073, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 11.860759493670885, | |
| "grad_norm": 0.25992837878278163, | |
| "learning_rate": 3.576536829081323e-08, | |
| "loss": 0.0677, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 11.962025316455696, | |
| "grad_norm": 0.27299775911291413, | |
| "learning_rate": 2.012853002380466e-08, | |
| "loss": 0.0789, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 12.10126582278481, | |
| "grad_norm": 0.22631594857909607, | |
| "learning_rate": 8.949351161324227e-09, | |
| "loss": 0.0663, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 12.20253164556962, | |
| "grad_norm": 0.3003461315969225, | |
| "learning_rate": 2.237838582483387e-09, | |
| "loss": 0.0834, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 12.30379746835443, | |
| "grad_norm": 0.21311917276522419, | |
| "learning_rate": 0.0, | |
| "loss": 0.0534, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 12.30379746835443, | |
| "step": 117, | |
| "total_flos": 5.484038039850189e+16, | |
| "train_loss": 0.2492905456540931, | |
| "train_runtime": 3417.7179, | |
| "train_samples_per_second": 1.202, | |
| "train_steps_per_second": 0.034 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 117, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 13, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.484038039850189e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |