| { | |
| "best_metric": 0.955902004454343, | |
| "best_model_checkpoint": "./nlu_finetuned_models/sst2/roberta-large_lr1e-05/checkpoint-26523", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 37890, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.13196093956188967, | |
| "grad_norm": 14.684767723083496, | |
| "learning_rate": 2.198768689533861e-06, | |
| "loss": 0.6452, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.26392187912377935, | |
| "grad_norm": 6.864231109619141, | |
| "learning_rate": 4.397537379067722e-06, | |
| "loss": 0.2856, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.39588281868566905, | |
| "grad_norm": 19.211210250854492, | |
| "learning_rate": 6.596306068601583e-06, | |
| "loss": 0.2445, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.5278437582475587, | |
| "grad_norm": 21.7692813873291, | |
| "learning_rate": 8.795074758135444e-06, | |
| "loss": 0.2459, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.6598046978094484, | |
| "grad_norm": 1.4644837379455566, | |
| "learning_rate": 9.936545372866128e-06, | |
| "loss": 0.2531, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.7917656373713381, | |
| "grad_norm": 18.3651065826416, | |
| "learning_rate": 9.796159029649596e-06, | |
| "loss": 0.2392, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.9237265769332278, | |
| "grad_norm": 9.527914047241211, | |
| "learning_rate": 9.655772686433064e-06, | |
| "loss": 0.2195, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9458054936896808, | |
| "eval_loss": 0.2150438129901886, | |
| "eval_runtime": 7.1689, | |
| "eval_samples_per_second": 939.47, | |
| "eval_steps_per_second": 58.726, | |
| "step": 3789 | |
| }, | |
| { | |
| "epoch": 1.0556875164951174, | |
| "grad_norm": 67.29890441894531, | |
| "learning_rate": 9.515386343216533e-06, | |
| "loss": 0.2065, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.187648456057007, | |
| "grad_norm": 3.1265335083007812, | |
| "learning_rate": 9.375000000000001e-06, | |
| "loss": 0.1957, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.3196093956188968, | |
| "grad_norm": 2.3976995944976807, | |
| "learning_rate": 9.234613656783469e-06, | |
| "loss": 0.1817, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.4515703351807865, | |
| "grad_norm": 0.1834949404001236, | |
| "learning_rate": 9.094227313566937e-06, | |
| "loss": 0.1855, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.5835312747426762, | |
| "grad_norm": 48.085628509521484, | |
| "learning_rate": 8.953840970350406e-06, | |
| "loss": 0.1865, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.7154922143045659, | |
| "grad_norm": 0.10328803956508636, | |
| "learning_rate": 8.813454627133872e-06, | |
| "loss": 0.1766, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.8474531538664554, | |
| "grad_norm": 6.290451526641846, | |
| "learning_rate": 8.67306828391734e-06, | |
| "loss": 0.1789, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.9794140934283453, | |
| "grad_norm": 0.1234852746129036, | |
| "learning_rate": 8.532681940700809e-06, | |
| "loss": 0.1849, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.948626577579807, | |
| "eval_loss": 0.16994288563728333, | |
| "eval_runtime": 7.3255, | |
| "eval_samples_per_second": 919.392, | |
| "eval_steps_per_second": 57.471, | |
| "step": 7578 | |
| }, | |
| { | |
| "epoch": 2.1113750329902348, | |
| "grad_norm": 0.3948022723197937, | |
| "learning_rate": 8.392295597484278e-06, | |
| "loss": 0.1325, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.2433359725521247, | |
| "grad_norm": 0.054762326180934906, | |
| "learning_rate": 8.251909254267747e-06, | |
| "loss": 0.1393, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.375296912114014, | |
| "grad_norm": 0.13094937801361084, | |
| "learning_rate": 8.111522911051213e-06, | |
| "loss": 0.131, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.507257851675904, | |
| "grad_norm": 1.9226784706115723, | |
| "learning_rate": 7.971136567834682e-06, | |
| "loss": 0.1454, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.6392187912377936, | |
| "grad_norm": 6.964934349060059, | |
| "learning_rate": 7.83075022461815e-06, | |
| "loss": 0.1458, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.771179730799683, | |
| "grad_norm": 12.109170913696289, | |
| "learning_rate": 7.690363881401618e-06, | |
| "loss": 0.1413, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.903140670361573, | |
| "grad_norm": 6.971311569213867, | |
| "learning_rate": 7.549977538185086e-06, | |
| "loss": 0.134, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9556050482553823, | |
| "eval_loss": 0.20318306982517242, | |
| "eval_runtime": 7.253, | |
| "eval_samples_per_second": 928.576, | |
| "eval_steps_per_second": 58.045, | |
| "step": 11367 | |
| }, | |
| { | |
| "epoch": 3.0351016099234625, | |
| "grad_norm": 58.1721076965332, | |
| "learning_rate": 7.409591194968554e-06, | |
| "loss": 0.1312, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 3.1670625494853524, | |
| "grad_norm": 0.04744189232587814, | |
| "learning_rate": 7.269204851752022e-06, | |
| "loss": 0.1035, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 3.299023489047242, | |
| "grad_norm": 0.29440391063690186, | |
| "learning_rate": 7.12881850853549e-06, | |
| "loss": 0.0867, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 3.4309844286091318, | |
| "grad_norm": 14.616955757141113, | |
| "learning_rate": 6.988432165318958e-06, | |
| "loss": 0.105, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 3.5629453681710213, | |
| "grad_norm": 2.5657553672790527, | |
| "learning_rate": 6.848045822102426e-06, | |
| "loss": 0.1108, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 3.694906307732911, | |
| "grad_norm": 0.30954834818840027, | |
| "learning_rate": 6.707659478885895e-06, | |
| "loss": 0.1045, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 3.8268672472948007, | |
| "grad_norm": 0.18087419867515564, | |
| "learning_rate": 6.567273135669363e-06, | |
| "loss": 0.1003, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 3.9588281868566906, | |
| "grad_norm": 0.2143256664276123, | |
| "learning_rate": 6.426886792452831e-06, | |
| "loss": 0.1015, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9553080920564216, | |
| "eval_loss": 0.2478303164243698, | |
| "eval_runtime": 7.168, | |
| "eval_samples_per_second": 939.587, | |
| "eval_steps_per_second": 58.733, | |
| "step": 15156 | |
| }, | |
| { | |
| "epoch": 4.0907891264185805, | |
| "grad_norm": 0.6192390322685242, | |
| "learning_rate": 6.2865004492362994e-06, | |
| "loss": 0.0898, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 4.2227500659804695, | |
| "grad_norm": 7.8285417556762695, | |
| "learning_rate": 6.146114106019767e-06, | |
| "loss": 0.0786, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 4.3547110055423595, | |
| "grad_norm": 3.172856330871582, | |
| "learning_rate": 6.005727762803235e-06, | |
| "loss": 0.073, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 4.486671945104249, | |
| "grad_norm": 0.03749631717801094, | |
| "learning_rate": 5.865341419586703e-06, | |
| "loss": 0.0808, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 4.618632884666139, | |
| "grad_norm": 0.01692149229347706, | |
| "learning_rate": 5.7249550763701715e-06, | |
| "loss": 0.0789, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 4.750593824228028, | |
| "grad_norm": 0.09162786602973938, | |
| "learning_rate": 5.584568733153639e-06, | |
| "loss": 0.0977, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 4.882554763789918, | |
| "grad_norm": 0.0481809638440609, | |
| "learning_rate": 5.444182389937107e-06, | |
| "loss": 0.0823, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9535263548626578, | |
| "eval_loss": 0.24863098561763763, | |
| "eval_runtime": 7.223, | |
| "eval_samples_per_second": 932.441, | |
| "eval_steps_per_second": 58.286, | |
| "step": 18945 | |
| }, | |
| { | |
| "epoch": 5.014515703351808, | |
| "grad_norm": 0.07389198243618011, | |
| "learning_rate": 5.303796046720575e-06, | |
| "loss": 0.0747, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 5.146476642913697, | |
| "grad_norm": 0.06816897541284561, | |
| "learning_rate": 5.163409703504043e-06, | |
| "loss": 0.0512, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 5.278437582475587, | |
| "grad_norm": 0.15339165925979614, | |
| "learning_rate": 5.023023360287511e-06, | |
| "loss": 0.0599, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 5.410398522037477, | |
| "grad_norm": 65.28050994873047, | |
| "learning_rate": 4.88263701707098e-06, | |
| "loss": 0.0733, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 5.542359461599366, | |
| "grad_norm": 6.615115642547607, | |
| "learning_rate": 4.7422506738544475e-06, | |
| "loss": 0.0625, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 5.674320401161256, | |
| "grad_norm": 40.05002212524414, | |
| "learning_rate": 4.601864330637916e-06, | |
| "loss": 0.0667, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 5.806281340723146, | |
| "grad_norm": 0.10902038961648941, | |
| "learning_rate": 4.461477987421384e-06, | |
| "loss": 0.0623, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 5.938242280285036, | |
| "grad_norm": 0.029639070853590965, | |
| "learning_rate": 4.321091644204852e-06, | |
| "loss": 0.062, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.955011135857461, | |
| "eval_loss": 0.31579306721687317, | |
| "eval_runtime": 7.3084, | |
| "eval_samples_per_second": 921.548, | |
| "eval_steps_per_second": 57.605, | |
| "step": 22734 | |
| }, | |
| { | |
| "epoch": 6.070203219846925, | |
| "grad_norm": 0.03687451034784317, | |
| "learning_rate": 4.18070530098832e-06, | |
| "loss": 0.0539, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 6.202164159408815, | |
| "grad_norm": 0.11739882826805115, | |
| "learning_rate": 4.040318957771789e-06, | |
| "loss": 0.0446, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 6.334125098970705, | |
| "grad_norm": 0.21322381496429443, | |
| "learning_rate": 3.899932614555256e-06, | |
| "loss": 0.0528, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 6.466086038532595, | |
| "grad_norm": 1.3615431785583496, | |
| "learning_rate": 3.7595462713387242e-06, | |
| "loss": 0.0551, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 6.598046978094484, | |
| "grad_norm": 0.005917022004723549, | |
| "learning_rate": 3.6191599281221925e-06, | |
| "loss": 0.0423, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 6.730007917656374, | |
| "grad_norm": 0.0038304822519421577, | |
| "learning_rate": 3.4787735849056607e-06, | |
| "loss": 0.0445, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 6.8619688572182636, | |
| "grad_norm": 0.23638126254081726, | |
| "learning_rate": 3.338387241689129e-06, | |
| "loss": 0.056, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 6.993929796780153, | |
| "grad_norm": 0.015073439106345177, | |
| "learning_rate": 3.1980008984725967e-06, | |
| "loss": 0.0547, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.955902004454343, | |
| "eval_loss": 0.30895689129829407, | |
| "eval_runtime": 7.2906, | |
| "eval_samples_per_second": 923.787, | |
| "eval_steps_per_second": 57.745, | |
| "step": 26523 | |
| }, | |
| { | |
| "epoch": 7.1258907363420425, | |
| "grad_norm": 0.04011049121618271, | |
| "learning_rate": 3.057614555256065e-06, | |
| "loss": 0.0366, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 7.257851675903932, | |
| "grad_norm": 0.04726456105709076, | |
| "learning_rate": 2.9172282120395328e-06, | |
| "loss": 0.0283, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 7.389812615465822, | |
| "grad_norm": 0.0173865407705307, | |
| "learning_rate": 2.776841868823001e-06, | |
| "loss": 0.0353, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 7.521773555027712, | |
| "grad_norm": 0.007793079596012831, | |
| "learning_rate": 2.6364555256064692e-06, | |
| "loss": 0.035, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 7.653734494589601, | |
| "grad_norm": 0.014837320894002914, | |
| "learning_rate": 2.496069182389937e-06, | |
| "loss": 0.0378, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 7.785695434151491, | |
| "grad_norm": 0.007654995657503605, | |
| "learning_rate": 2.3556828391734053e-06, | |
| "loss": 0.0425, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 7.917656373713381, | |
| "grad_norm": 0.2803705036640167, | |
| "learning_rate": 2.2152964959568735e-06, | |
| "loss": 0.039, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.9536748329621381, | |
| "eval_loss": 0.3630528151988983, | |
| "eval_runtime": 7.1993, | |
| "eval_samples_per_second": 935.506, | |
| "eval_steps_per_second": 58.478, | |
| "step": 30312 | |
| }, | |
| { | |
| "epoch": 8.04961731327527, | |
| "grad_norm": 0.04258790612220764, | |
| "learning_rate": 2.0749101527403413e-06, | |
| "loss": 0.0369, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 8.181578252837161, | |
| "grad_norm": 0.008549284189939499, | |
| "learning_rate": 1.9345238095238096e-06, | |
| "loss": 0.0185, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 8.31353919239905, | |
| "grad_norm": 0.002880709245800972, | |
| "learning_rate": 1.7941374663072778e-06, | |
| "loss": 0.03, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 8.445500131960939, | |
| "grad_norm": 0.007983551360666752, | |
| "learning_rate": 1.6537511230907458e-06, | |
| "loss": 0.0264, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 8.57746107152283, | |
| "grad_norm": 0.005722792819142342, | |
| "learning_rate": 1.513364779874214e-06, | |
| "loss": 0.0354, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 8.709422011084719, | |
| "grad_norm": 0.006476383190602064, | |
| "learning_rate": 1.372978436657682e-06, | |
| "loss": 0.0292, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 8.841382950646608, | |
| "grad_norm": 0.029401870444417, | |
| "learning_rate": 1.2325920934411503e-06, | |
| "loss": 0.027, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 8.973343890208499, | |
| "grad_norm": 14.256837844848633, | |
| "learning_rate": 1.092205750224618e-06, | |
| "loss": 0.0234, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.9542687453600593, | |
| "eval_loss": 0.38435572385787964, | |
| "eval_runtime": 7.155, | |
| "eval_samples_per_second": 941.299, | |
| "eval_steps_per_second": 58.84, | |
| "step": 34101 | |
| }, | |
| { | |
| "epoch": 9.105304829770388, | |
| "grad_norm": 0.0036281670909374952, | |
| "learning_rate": 9.518194070080863e-07, | |
| "loss": 0.017, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 9.237265769332277, | |
| "grad_norm": 0.009701136499643326, | |
| "learning_rate": 8.114330637915545e-07, | |
| "loss": 0.0247, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 9.369226708894168, | |
| "grad_norm": 0.07909628003835678, | |
| "learning_rate": 6.710467205750225e-07, | |
| "loss": 0.0197, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 9.501187648456057, | |
| "grad_norm": 0.00657819677144289, | |
| "learning_rate": 5.306603773584906e-07, | |
| "loss": 0.0176, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 9.633148588017947, | |
| "grad_norm": 0.009074714966118336, | |
| "learning_rate": 3.902740341419587e-07, | |
| "loss": 0.0241, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 9.765109527579837, | |
| "grad_norm": 0.026519833132624626, | |
| "learning_rate": 2.4988769092542675e-07, | |
| "loss": 0.0208, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 9.897070467141726, | |
| "grad_norm": 0.001519062090665102, | |
| "learning_rate": 1.095013477088949e-07, | |
| "loss": 0.0163, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.9535263548626578, | |
| "eval_loss": 0.3952884376049042, | |
| "eval_runtime": 7.2818, | |
| "eval_samples_per_second": 924.908, | |
| "eval_steps_per_second": 57.815, | |
| "step": 37890 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 37890, | |
| "total_flos": 3.948673638307961e+16, | |
| "train_loss": 0.10002695248730424, | |
| "train_runtime": 3896.4569, | |
| "train_samples_per_second": 155.562, | |
| "train_steps_per_second": 9.724 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 37890, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.948673638307961e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |