| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 12.30379746835443, | |
| "eval_steps": 500, | |
| "global_step": 117, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.10126582278481013, | |
| "grad_norm": 6.1036845704758855, | |
| "learning_rate": 8.333333333333333e-07, | |
| "loss": 0.7812, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.20253164556962025, | |
| "grad_norm": 6.211931564456468, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 0.8133, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.3037974683544304, | |
| "grad_norm": 5.790768954239727, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.7502, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.4050632911392405, | |
| "grad_norm": 6.013932016991219, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.8266, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.5063291139240507, | |
| "grad_norm": 4.390681107164878, | |
| "learning_rate": 4.166666666666667e-06, | |
| "loss": 0.7591, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.6075949367088608, | |
| "grad_norm": 2.373460332124642, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6998, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.7088607594936709, | |
| "grad_norm": 2.049676391051526, | |
| "learning_rate": 5.833333333333334e-06, | |
| "loss": 0.6863, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.810126582278481, | |
| "grad_norm": 3.6997148412349437, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.6978, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.9113924050632911, | |
| "grad_norm": 3.5256032920681037, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.6739, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 1.0506329113924051, | |
| "grad_norm": 6.2693023812601725, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 1.029, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.1518987341772151, | |
| "grad_norm": 4.158105021945833, | |
| "learning_rate": 9.166666666666666e-06, | |
| "loss": 0.6936, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 1.2531645569620253, | |
| "grad_norm": 3.520460730229181, | |
| "learning_rate": 1e-05, | |
| "loss": 0.6721, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.3544303797468356, | |
| "grad_norm": 2.1407618632862238, | |
| "learning_rate": 9.997762161417517e-06, | |
| "loss": 0.6373, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 1.4556962025316456, | |
| "grad_norm": 1.346669398553155, | |
| "learning_rate": 9.991050648838676e-06, | |
| "loss": 0.5256, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.5569620253164556, | |
| "grad_norm": 1.83167377388535, | |
| "learning_rate": 9.979871469976197e-06, | |
| "loss": 0.6005, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.6582278481012658, | |
| "grad_norm": 1.6292211200987232, | |
| "learning_rate": 9.964234631709188e-06, | |
| "loss": 0.6091, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.759493670886076, | |
| "grad_norm": 1.0642263550063324, | |
| "learning_rate": 9.944154131125643e-06, | |
| "loss": 0.5857, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 1.8607594936708862, | |
| "grad_norm": 0.8733482336806271, | |
| "learning_rate": 9.91964794299315e-06, | |
| "loss": 0.5395, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.9620253164556962, | |
| "grad_norm": 1.5754667694130446, | |
| "learning_rate": 9.890738003669029e-06, | |
| "loss": 0.8551, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 2.1012658227848102, | |
| "grad_norm": 1.026619296346142, | |
| "learning_rate": 9.857450191464337e-06, | |
| "loss": 0.5609, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.2025316455696204, | |
| "grad_norm": 0.9301919561276994, | |
| "learning_rate": 9.819814303479268e-06, | |
| "loss": 0.5755, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 2.3037974683544302, | |
| "grad_norm": 0.8790969152130147, | |
| "learning_rate": 9.777864028930705e-06, | |
| "loss": 0.5269, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 2.4050632911392404, | |
| "grad_norm": 0.8909937598519407, | |
| "learning_rate": 9.731636918995821e-06, | |
| "loss": 0.5121, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.5063291139240507, | |
| "grad_norm": 0.6561997149156265, | |
| "learning_rate": 9.681174353198687e-06, | |
| "loss": 0.4948, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 2.607594936708861, | |
| "grad_norm": 0.6912117623767908, | |
| "learning_rate": 9.626521502369984e-06, | |
| "loss": 0.5176, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 2.708860759493671, | |
| "grad_norm": 0.7437610952904947, | |
| "learning_rate": 9.567727288213005e-06, | |
| "loss": 0.4914, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 2.810126582278481, | |
| "grad_norm": 0.7026861026072948, | |
| "learning_rate": 9.504844339512096e-06, | |
| "loss": 0.4788, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 2.911392405063291, | |
| "grad_norm": 0.5809296387083083, | |
| "learning_rate": 9.437928945022772e-06, | |
| "loss": 0.4816, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 3.050632911392405, | |
| "grad_norm": 1.0107791402531994, | |
| "learning_rate": 9.36704100308565e-06, | |
| "loss": 0.6794, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 3.151898734177215, | |
| "grad_norm": 0.6424539355378337, | |
| "learning_rate": 9.292243968009332e-06, | |
| "loss": 0.4825, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 3.2531645569620253, | |
| "grad_norm": 0.7542031622647473, | |
| "learning_rate": 9.213604793270196e-06, | |
| "loss": 0.4446, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 3.3544303797468356, | |
| "grad_norm": 0.6714440923614907, | |
| "learning_rate": 9.131193871579975e-06, | |
| "loss": 0.4194, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 3.4556962025316453, | |
| "grad_norm": 0.5821287458120061, | |
| "learning_rate": 9.045084971874738e-06, | |
| "loss": 0.4674, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 3.5569620253164556, | |
| "grad_norm": 0.6131543708192907, | |
| "learning_rate": 8.955355173281709e-06, | |
| "loss": 0.4467, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 3.6582278481012658, | |
| "grad_norm": 0.6330509230082857, | |
| "learning_rate": 8.862084796122998e-06, | |
| "loss": 0.4316, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 3.759493670886076, | |
| "grad_norm": 0.6271528911438414, | |
| "learning_rate": 8.765357330018056e-06, | |
| "loss": 0.435, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 3.8607594936708862, | |
| "grad_norm": 0.5302978616627516, | |
| "learning_rate": 8.665259359149132e-06, | |
| "loss": 0.3998, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 3.962025316455696, | |
| "grad_norm": 0.8739795045555382, | |
| "learning_rate": 8.561880484756726e-06, | |
| "loss": 0.684, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 4.10126582278481, | |
| "grad_norm": 0.5734268578269217, | |
| "learning_rate": 8.455313244934324e-06, | |
| "loss": 0.3893, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 4.2025316455696204, | |
| "grad_norm": 0.5779200174086098, | |
| "learning_rate": 8.345653031794292e-06, | |
| "loss": 0.3813, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 4.30379746835443, | |
| "grad_norm": 0.5499093804843386, | |
| "learning_rate": 8.232998006078998e-06, | |
| "loss": 0.4055, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 4.405063291139241, | |
| "grad_norm": 0.5265167844202177, | |
| "learning_rate": 8.117449009293668e-06, | |
| "loss": 0.3812, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 4.506329113924051, | |
| "grad_norm": 0.5400291629686085, | |
| "learning_rate": 7.99910947343957e-06, | |
| "loss": 0.3753, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 4.6075949367088604, | |
| "grad_norm": 0.5106433176681838, | |
| "learning_rate": 7.87808532842837e-06, | |
| "loss": 0.375, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 4.708860759493671, | |
| "grad_norm": 0.4566746058161919, | |
| "learning_rate": 7.754484907260513e-06, | |
| "loss": 0.3629, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 4.810126582278481, | |
| "grad_norm": 0.4632133044636238, | |
| "learning_rate": 7.628418849052523e-06, | |
| "loss": 0.382, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 4.911392405063291, | |
| "grad_norm": 0.526047149406831, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.3743, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 5.050632911392405, | |
| "grad_norm": 0.8126085232325019, | |
| "learning_rate": 7.369343312364994e-06, | |
| "loss": 0.5711, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 5.151898734177215, | |
| "grad_norm": 0.4455087514591805, | |
| "learning_rate": 7.236565741578163e-06, | |
| "loss": 0.3194, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 5.253164556962025, | |
| "grad_norm": 0.5287858156135754, | |
| "learning_rate": 7.101786141547829e-06, | |
| "loss": 0.339, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 5.3544303797468356, | |
| "grad_norm": 0.5187534527789833, | |
| "learning_rate": 6.965125158269619e-06, | |
| "loss": 0.3151, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 5.455696202531645, | |
| "grad_norm": 0.48548565527293497, | |
| "learning_rate": 6.8267051218319766e-06, | |
| "loss": 0.3196, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 5.556962025316456, | |
| "grad_norm": 0.575974481147825, | |
| "learning_rate": 6.686649936914151e-06, | |
| "loss": 0.3338, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 5.658227848101266, | |
| "grad_norm": 0.5546881944998475, | |
| "learning_rate": 6.545084971874738e-06, | |
| "loss": 0.3117, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 5.759493670886076, | |
| "grad_norm": 0.4307213262189174, | |
| "learning_rate": 6.402136946530014e-06, | |
| "loss": 0.3083, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 5.860759493670886, | |
| "grad_norm": 0.44082398239007436, | |
| "learning_rate": 6.257933818722544e-06, | |
| "loss": 0.3231, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 5.962025316455696, | |
| "grad_norm": 0.8402823514466229, | |
| "learning_rate": 6.112604669781572e-06, | |
| "loss": 0.4957, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 6.10126582278481, | |
| "grad_norm": 0.4874816730227165, | |
| "learning_rate": 5.9662795889777666e-06, | |
| "loss": 0.2668, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 6.2025316455696204, | |
| "grad_norm": 0.5376664005455791, | |
| "learning_rate": 5.819089557075689e-06, | |
| "loss": 0.2668, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 6.30379746835443, | |
| "grad_norm": 0.5419620482445535, | |
| "learning_rate": 5.671166329088278e-06, | |
| "loss": 0.2833, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 6.405063291139241, | |
| "grad_norm": 0.5257415036659032, | |
| "learning_rate": 5.522642316338268e-06, | |
| "loss": 0.2675, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 6.506329113924051, | |
| "grad_norm": 0.5024353585089631, | |
| "learning_rate": 5.373650467932122e-06, | |
| "loss": 0.2761, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 6.6075949367088604, | |
| "grad_norm": 0.48228509714201534, | |
| "learning_rate": 5.224324151752575e-06, | |
| "loss": 0.2665, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 6.708860759493671, | |
| "grad_norm": 0.562477591851496, | |
| "learning_rate": 5.074797035076319e-06, | |
| "loss": 0.2755, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 6.810126582278481, | |
| "grad_norm": 0.5102782233861666, | |
| "learning_rate": 4.9252029649236835e-06, | |
| "loss": 0.2754, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 6.911392405063291, | |
| "grad_norm": 0.49093407572541736, | |
| "learning_rate": 4.775675848247427e-06, | |
| "loss": 0.2573, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 7.050632911392405, | |
| "grad_norm": 0.7479428740498759, | |
| "learning_rate": 4.626349532067879e-06, | |
| "loss": 0.3979, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 7.151898734177215, | |
| "grad_norm": 0.5201056231969392, | |
| "learning_rate": 4.477357683661734e-06, | |
| "loss": 0.22, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 7.253164556962025, | |
| "grad_norm": 0.5151144705091305, | |
| "learning_rate": 4.3288336709117246e-06, | |
| "loss": 0.2389, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 7.3544303797468356, | |
| "grad_norm": 0.49602585283187683, | |
| "learning_rate": 4.180910442924312e-06, | |
| "loss": 0.2457, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 7.455696202531645, | |
| "grad_norm": 0.6035258985510871, | |
| "learning_rate": 4.033720411022235e-06, | |
| "loss": 0.2281, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 7.556962025316456, | |
| "grad_norm": 0.506215143501012, | |
| "learning_rate": 3.887395330218429e-06, | |
| "loss": 0.2147, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 7.658227848101266, | |
| "grad_norm": 0.4965428798020858, | |
| "learning_rate": 3.7420661812774577e-06, | |
| "loss": 0.2455, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 7.759493670886076, | |
| "grad_norm": 0.44190339502547793, | |
| "learning_rate": 3.5978630534699873e-06, | |
| "loss": 0.2018, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 7.860759493670886, | |
| "grad_norm": 0.45547876258399317, | |
| "learning_rate": 3.4549150281252635e-06, | |
| "loss": 0.1986, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 7.962025316455696, | |
| "grad_norm": 0.7161587795863175, | |
| "learning_rate": 3.3133500630858507e-06, | |
| "loss": 0.3547, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 8.10126582278481, | |
| "grad_norm": 0.42601775992677765, | |
| "learning_rate": 3.173294878168025e-06, | |
| "loss": 0.2076, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 8.20253164556962, | |
| "grad_norm": 0.4944626930087208, | |
| "learning_rate": 3.0348748417303826e-06, | |
| "loss": 0.1929, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 8.30379746835443, | |
| "grad_norm": 0.5728528367532345, | |
| "learning_rate": 2.8982138584521734e-06, | |
| "loss": 0.2035, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 8.405063291139241, | |
| "grad_norm": 0.4362550178442035, | |
| "learning_rate": 2.7634342584218364e-06, | |
| "loss": 0.1989, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 8.50632911392405, | |
| "grad_norm": 0.45868257290328096, | |
| "learning_rate": 2.6306566876350072e-06, | |
| "loss": 0.1949, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 8.60759493670886, | |
| "grad_norm": 0.4637782410909022, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 0.1844, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 8.708860759493671, | |
| "grad_norm": 0.44510439455018846, | |
| "learning_rate": 2.371581150947476e-06, | |
| "loss": 0.1943, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 8.810126582278482, | |
| "grad_norm": 0.4461857643014094, | |
| "learning_rate": 2.245515092739488e-06, | |
| "loss": 0.1945, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 8.91139240506329, | |
| "grad_norm": 0.4082427904834379, | |
| "learning_rate": 2.1219146715716332e-06, | |
| "loss": 0.1673, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 9.050632911392405, | |
| "grad_norm": 0.6395003552555746, | |
| "learning_rate": 2.0008905265604316e-06, | |
| "loss": 0.2712, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 9.151898734177216, | |
| "grad_norm": 0.47692878154869267, | |
| "learning_rate": 1.8825509907063328e-06, | |
| "loss": 0.1623, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 9.253164556962025, | |
| "grad_norm": 0.43643323518964133, | |
| "learning_rate": 1.7670019939210025e-06, | |
| "loss": 0.1694, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 9.354430379746836, | |
| "grad_norm": 0.3903382248014779, | |
| "learning_rate": 1.6543469682057105e-06, | |
| "loss": 0.1488, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 9.455696202531646, | |
| "grad_norm": 0.47851943237177375, | |
| "learning_rate": 1.544686755065677e-06, | |
| "loss": 0.1789, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 9.556962025316455, | |
| "grad_norm": 0.4548987582200087, | |
| "learning_rate": 1.438119515243277e-06, | |
| "loss": 0.1623, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 9.658227848101266, | |
| "grad_norm": 0.4425566552741696, | |
| "learning_rate": 1.3347406408508695e-06, | |
| "loss": 0.1663, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 9.759493670886076, | |
| "grad_norm": 0.42661164836114646, | |
| "learning_rate": 1.234642669981946e-06, | |
| "loss": 0.1599, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 9.860759493670885, | |
| "grad_norm": 0.41370944162867496, | |
| "learning_rate": 1.137915203877003e-06, | |
| "loss": 0.1684, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 9.962025316455696, | |
| "grad_norm": 0.7267937888823227, | |
| "learning_rate": 1.044644826718295e-06, | |
| "loss": 0.2643, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 10.10126582278481, | |
| "grad_norm": 0.45995032284252424, | |
| "learning_rate": 9.549150281252633e-07, | |
| "loss": 0.1531, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 10.20253164556962, | |
| "grad_norm": 0.4406434429607988, | |
| "learning_rate": 8.688061284200266e-07, | |
| "loss": 0.1605, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 10.30379746835443, | |
| "grad_norm": 0.42714155253941855, | |
| "learning_rate": 7.863952067298042e-07, | |
| "loss": 0.1623, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 10.405063291139241, | |
| "grad_norm": 0.39220634705008045, | |
| "learning_rate": 7.077560319906696e-07, | |
| "loss": 0.1441, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 10.50632911392405, | |
| "grad_norm": 0.47555340870435764, | |
| "learning_rate": 6.329589969143518e-07, | |
| "loss": 0.1598, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 10.60759493670886, | |
| "grad_norm": 0.5228165541628115, | |
| "learning_rate": 5.620710549772295e-07, | |
| "loss": 0.1628, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 10.708860759493671, | |
| "grad_norm": 0.43073214177365693, | |
| "learning_rate": 4.951556604879049e-07, | |
| "loss": 0.1519, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 10.810126582278482, | |
| "grad_norm": 0.37897080903635194, | |
| "learning_rate": 4.322727117869951e-07, | |
| "loss": 0.1428, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 10.91139240506329, | |
| "grad_norm": 0.371960658859043, | |
| "learning_rate": 3.734784976300165e-07, | |
| "loss": 0.1406, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 11.050632911392405, | |
| "grad_norm": 0.6022907841646971, | |
| "learning_rate": 3.18825646801314e-07, | |
| "loss": 0.2183, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 11.151898734177216, | |
| "grad_norm": 0.38744645851536486, | |
| "learning_rate": 2.6836308100417874e-07, | |
| "loss": 0.1563, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 11.253164556962025, | |
| "grad_norm": 0.3695125100205556, | |
| "learning_rate": 2.2213597106929608e-07, | |
| "loss": 0.142, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 11.354430379746836, | |
| "grad_norm": 0.40210728163448434, | |
| "learning_rate": 1.801856965207338e-07, | |
| "loss": 0.1365, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 11.455696202531646, | |
| "grad_norm": 0.4001408002590834, | |
| "learning_rate": 1.4254980853566248e-07, | |
| "loss": 0.1482, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 11.556962025316455, | |
| "grad_norm": 0.3655046893382892, | |
| "learning_rate": 1.0926199633097156e-07, | |
| "loss": 0.1312, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 11.658227848101266, | |
| "grad_norm": 0.3771122612835669, | |
| "learning_rate": 8.035205700685167e-08, | |
| "loss": 0.1458, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 11.759493670886076, | |
| "grad_norm": 0.3478381459734836, | |
| "learning_rate": 5.584586887435739e-08, | |
| "loss": 0.1662, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 11.860759493670885, | |
| "grad_norm": 0.34843621011773535, | |
| "learning_rate": 3.576536829081323e-08, | |
| "loss": 0.1446, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 11.962025316455696, | |
| "grad_norm": 0.596966030465398, | |
| "learning_rate": 2.012853002380466e-08, | |
| "loss": 0.2275, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 12.10126582278481, | |
| "grad_norm": 0.3605724857145422, | |
| "learning_rate": 8.949351161324227e-09, | |
| "loss": 0.1392, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 12.20253164556962, | |
| "grad_norm": 0.36064053210086594, | |
| "learning_rate": 2.237838582483387e-09, | |
| "loss": 0.1442, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 12.30379746835443, | |
| "grad_norm": 0.3551070685344023, | |
| "learning_rate": 0.0, | |
| "loss": 0.1526, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 12.30379746835443, | |
| "step": 117, | |
| "total_flos": 1.4430009490721997e+17, | |
| "train_loss": 0.3592390182436022, | |
| "train_runtime": 4910.5215, | |
| "train_samples_per_second": 0.837, | |
| "train_steps_per_second": 0.024 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 117, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 13, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.4430009490721997e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |