| { | |
| "best_metric": 0.9517446176688938, | |
| "best_model_checkpoint": "./nlu_finetuned_models/sst2/roberta-base_lr1e-05/checkpoint-18945", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 37890, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.13196093956188967, | |
| "grad_norm": 16.203157424926758, | |
| "learning_rate": 2.198768689533861e-06, | |
| "loss": 0.6535, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.26392187912377935, | |
| "grad_norm": 39.97472381591797, | |
| "learning_rate": 4.397537379067722e-06, | |
| "loss": 0.3243, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.39588281868566905, | |
| "grad_norm": 32.75914001464844, | |
| "learning_rate": 6.596306068601583e-06, | |
| "loss": 0.2817, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.5278437582475587, | |
| "grad_norm": 18.060081481933594, | |
| "learning_rate": 8.795074758135444e-06, | |
| "loss": 0.2591, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.6598046978094484, | |
| "grad_norm": 6.257637977600098, | |
| "learning_rate": 9.936545372866128e-06, | |
| "loss": 0.2599, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.7917656373713381, | |
| "grad_norm": 27.890222549438477, | |
| "learning_rate": 9.796159029649596e-06, | |
| "loss": 0.2496, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.9237265769332278, | |
| "grad_norm": 29.086610794067383, | |
| "learning_rate": 9.655772686433064e-06, | |
| "loss": 0.244, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9365998515219005, | |
| "eval_loss": 0.19152739644050598, | |
| "eval_runtime": 3.2031, | |
| "eval_samples_per_second": 2102.672, | |
| "eval_steps_per_second": 131.436, | |
| "step": 3789 | |
| }, | |
| { | |
| "epoch": 1.0556875164951174, | |
| "grad_norm": 32.525482177734375, | |
| "learning_rate": 9.515386343216533e-06, | |
| "loss": 0.2199, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.187648456057007, | |
| "grad_norm": 39.73225402832031, | |
| "learning_rate": 9.375000000000001e-06, | |
| "loss": 0.2081, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.3196093956188968, | |
| "grad_norm": 18.29433250427246, | |
| "learning_rate": 9.234613656783469e-06, | |
| "loss": 0.1982, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.4515703351807865, | |
| "grad_norm": 0.6866791248321533, | |
| "learning_rate": 9.094227313566937e-06, | |
| "loss": 0.2094, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.5835312747426762, | |
| "grad_norm": 2.060551166534424, | |
| "learning_rate": 8.953840970350406e-06, | |
| "loss": 0.2017, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.7154922143045659, | |
| "grad_norm": 26.714765548706055, | |
| "learning_rate": 8.813454627133872e-06, | |
| "loss": 0.1891, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.8474531538664554, | |
| "grad_norm": 6.763668060302734, | |
| "learning_rate": 8.67306828391734e-06, | |
| "loss": 0.2028, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.9794140934283453, | |
| "grad_norm": 0.7139686346054077, | |
| "learning_rate": 8.532681940700809e-06, | |
| "loss": 0.1984, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9406087602078693, | |
| "eval_loss": 0.22181583940982819, | |
| "eval_runtime": 3.2002, | |
| "eval_samples_per_second": 2104.543, | |
| "eval_steps_per_second": 131.553, | |
| "step": 7578 | |
| }, | |
| { | |
| "epoch": 2.1113750329902348, | |
| "grad_norm": 1.851331353187561, | |
| "learning_rate": 8.392295597484278e-06, | |
| "loss": 0.1615, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.2433359725521247, | |
| "grad_norm": 0.1076214462518692, | |
| "learning_rate": 8.251909254267747e-06, | |
| "loss": 0.1622, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.375296912114014, | |
| "grad_norm": 3.893419027328491, | |
| "learning_rate": 8.111522911051213e-06, | |
| "loss": 0.1506, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.507257851675904, | |
| "grad_norm": 0.19271089136600494, | |
| "learning_rate": 7.971136567834682e-06, | |
| "loss": 0.1713, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.6392187912377936, | |
| "grad_norm": 0.5250680446624756, | |
| "learning_rate": 7.83075022461815e-06, | |
| "loss": 0.1591, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.771179730799683, | |
| "grad_norm": 72.66580963134766, | |
| "learning_rate": 7.690363881401618e-06, | |
| "loss": 0.1688, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.903140670361573, | |
| "grad_norm": 12.931042671203613, | |
| "learning_rate": 7.549977538185086e-06, | |
| "loss": 0.1595, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9492204899777282, | |
| "eval_loss": 0.22260741889476776, | |
| "eval_runtime": 3.2317, | |
| "eval_samples_per_second": 2084.074, | |
| "eval_steps_per_second": 130.274, | |
| "step": 11367 | |
| }, | |
| { | |
| "epoch": 3.0351016099234625, | |
| "grad_norm": 0.4131239056587219, | |
| "learning_rate": 7.409591194968554e-06, | |
| "loss": 0.142, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 3.1670625494853524, | |
| "grad_norm": 18.098291397094727, | |
| "learning_rate": 7.269204851752022e-06, | |
| "loss": 0.1164, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 3.299023489047242, | |
| "grad_norm": 0.05766018107533455, | |
| "learning_rate": 7.12881850853549e-06, | |
| "loss": 0.1209, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 3.4309844286091318, | |
| "grad_norm": 11.65517807006836, | |
| "learning_rate": 6.988432165318958e-06, | |
| "loss": 0.131, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 3.5629453681710213, | |
| "grad_norm": 104.57723999023438, | |
| "learning_rate": 6.848045822102426e-06, | |
| "loss": 0.1415, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 3.694906307732911, | |
| "grad_norm": 0.9235928654670715, | |
| "learning_rate": 6.707659478885895e-06, | |
| "loss": 0.1359, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 3.8268672472948007, | |
| "grad_norm": 32.136817932128906, | |
| "learning_rate": 6.567273135669363e-06, | |
| "loss": 0.1356, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 3.9588281868566906, | |
| "grad_norm": 8.762619018554688, | |
| "learning_rate": 6.426886792452831e-06, | |
| "loss": 0.1225, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9501113585746103, | |
| "eval_loss": 0.24848563969135284, | |
| "eval_runtime": 3.2316, | |
| "eval_samples_per_second": 2084.138, | |
| "eval_steps_per_second": 130.278, | |
| "step": 15156 | |
| }, | |
| { | |
| "epoch": 4.0907891264185805, | |
| "grad_norm": 47.4286994934082, | |
| "learning_rate": 6.2865004492362994e-06, | |
| "loss": 0.1143, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 4.2227500659804695, | |
| "grad_norm": 17.368412017822266, | |
| "learning_rate": 6.146114106019767e-06, | |
| "loss": 0.1018, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 4.3547110055423595, | |
| "grad_norm": 8.772218704223633, | |
| "learning_rate": 6.005727762803235e-06, | |
| "loss": 0.1123, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 4.486671945104249, | |
| "grad_norm": 21.83694839477539, | |
| "learning_rate": 5.865341419586703e-06, | |
| "loss": 0.1039, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 4.618632884666139, | |
| "grad_norm": 24.351490020751953, | |
| "learning_rate": 5.7249550763701715e-06, | |
| "loss": 0.1156, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 4.750593824228028, | |
| "grad_norm": 0.13713432848453522, | |
| "learning_rate": 5.584568733153639e-06, | |
| "loss": 0.1225, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 4.882554763789918, | |
| "grad_norm": 0.23665529489517212, | |
| "learning_rate": 5.444182389937107e-06, | |
| "loss": 0.1061, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9517446176688938, | |
| "eval_loss": 0.2689071297645569, | |
| "eval_runtime": 3.2131, | |
| "eval_samples_per_second": 2096.114, | |
| "eval_steps_per_second": 131.027, | |
| "step": 18945 | |
| }, | |
| { | |
| "epoch": 5.014515703351808, | |
| "grad_norm": 0.020952098071575165, | |
| "learning_rate": 5.303796046720575e-06, | |
| "loss": 0.0945, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 5.146476642913697, | |
| "grad_norm": 0.08160785585641861, | |
| "learning_rate": 5.163409703504043e-06, | |
| "loss": 0.0741, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 5.278437582475587, | |
| "grad_norm": 3.612468957901001, | |
| "learning_rate": 5.023023360287511e-06, | |
| "loss": 0.0848, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 5.410398522037477, | |
| "grad_norm": 0.16665929555892944, | |
| "learning_rate": 4.88263701707098e-06, | |
| "loss": 0.0856, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 5.542359461599366, | |
| "grad_norm": 14.307438850402832, | |
| "learning_rate": 4.7422506738544475e-06, | |
| "loss": 0.0919, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 5.674320401161256, | |
| "grad_norm": 81.50785827636719, | |
| "learning_rate": 4.601864330637916e-06, | |
| "loss": 0.091, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 5.806281340723146, | |
| "grad_norm": 0.545400857925415, | |
| "learning_rate": 4.461477987421384e-06, | |
| "loss": 0.0966, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 5.938242280285036, | |
| "grad_norm": 0.13066865503787994, | |
| "learning_rate": 4.321091644204852e-06, | |
| "loss": 0.0921, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9498144023756496, | |
| "eval_loss": 0.26830601692199707, | |
| "eval_runtime": 3.2279, | |
| "eval_samples_per_second": 2086.527, | |
| "eval_steps_per_second": 130.427, | |
| "step": 22734 | |
| }, | |
| { | |
| "epoch": 6.070203219846925, | |
| "grad_norm": 0.016320781782269478, | |
| "learning_rate": 4.18070530098832e-06, | |
| "loss": 0.0786, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 6.202164159408815, | |
| "grad_norm": 1.0383734703063965, | |
| "learning_rate": 4.040318957771789e-06, | |
| "loss": 0.0598, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 6.334125098970705, | |
| "grad_norm": 37.35941696166992, | |
| "learning_rate": 3.899932614555256e-06, | |
| "loss": 0.0741, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 6.466086038532595, | |
| "grad_norm": 0.22372713685035706, | |
| "learning_rate": 3.7595462713387242e-06, | |
| "loss": 0.0648, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 6.598046978094484, | |
| "grad_norm": 0.3024131953716278, | |
| "learning_rate": 3.6191599281221925e-06, | |
| "loss": 0.0742, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 6.730007917656374, | |
| "grad_norm": 0.5557472109794617, | |
| "learning_rate": 3.4787735849056607e-06, | |
| "loss": 0.0697, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 6.8619688572182636, | |
| "grad_norm": 10.842752456665039, | |
| "learning_rate": 3.338387241689129e-06, | |
| "loss": 0.0781, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 6.993929796780153, | |
| "grad_norm": 0.058014508336782455, | |
| "learning_rate": 3.1980008984725967e-06, | |
| "loss": 0.0749, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9480326651818857, | |
| "eval_loss": 0.2953985631465912, | |
| "eval_runtime": 3.1998, | |
| "eval_samples_per_second": 2104.852, | |
| "eval_steps_per_second": 131.573, | |
| "step": 26523 | |
| }, | |
| { | |
| "epoch": 7.1258907363420425, | |
| "grad_norm": 0.021114373579621315, | |
| "learning_rate": 3.057614555256065e-06, | |
| "loss": 0.0606, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 7.257851675903932, | |
| "grad_norm": 0.09994231164455414, | |
| "learning_rate": 2.9172282120395328e-06, | |
| "loss": 0.0513, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 7.389812615465822, | |
| "grad_norm": 0.02594364993274212, | |
| "learning_rate": 2.776841868823001e-06, | |
| "loss": 0.0578, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 7.521773555027712, | |
| "grad_norm": 0.3120315968990326, | |
| "learning_rate": 2.6364555256064692e-06, | |
| "loss": 0.0622, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 7.653734494589601, | |
| "grad_norm": 0.04183578863739967, | |
| "learning_rate": 2.496069182389937e-06, | |
| "loss": 0.0644, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 7.785695434151491, | |
| "grad_norm": 3.4326181411743164, | |
| "learning_rate": 2.3556828391734053e-06, | |
| "loss": 0.0583, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 7.917656373713381, | |
| "grad_norm": 0.20767995715141296, | |
| "learning_rate": 2.2152964959568735e-06, | |
| "loss": 0.0625, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.9484780994803267, | |
| "eval_loss": 0.343423455953598, | |
| "eval_runtime": 3.2166, | |
| "eval_samples_per_second": 2093.807, | |
| "eval_steps_per_second": 130.882, | |
| "step": 30312 | |
| }, | |
| { | |
| "epoch": 8.04961731327527, | |
| "grad_norm": 0.006022294517606497, | |
| "learning_rate": 2.0749101527403413e-06, | |
| "loss": 0.0569, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 8.181578252837161, | |
| "grad_norm": 2.1597390174865723, | |
| "learning_rate": 1.9345238095238096e-06, | |
| "loss": 0.044, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 8.31353919239905, | |
| "grad_norm": 0.019608333706855774, | |
| "learning_rate": 1.7941374663072778e-06, | |
| "loss": 0.0509, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 8.445500131960939, | |
| "grad_norm": 0.008767606690526009, | |
| "learning_rate": 1.6537511230907458e-06, | |
| "loss": 0.0437, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 8.57746107152283, | |
| "grad_norm": 0.47362828254699707, | |
| "learning_rate": 1.513364779874214e-06, | |
| "loss": 0.0505, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 8.709422011084719, | |
| "grad_norm": 0.08651397377252579, | |
| "learning_rate": 1.372978436657682e-06, | |
| "loss": 0.0571, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 8.841382950646608, | |
| "grad_norm": 0.09942079335451126, | |
| "learning_rate": 1.2325920934411503e-06, | |
| "loss": 0.0466, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 8.973343890208499, | |
| "grad_norm": 33.968544006347656, | |
| "learning_rate": 1.092205750224618e-06, | |
| "loss": 0.048, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.948626577579807, | |
| "eval_loss": 0.3509092926979065, | |
| "eval_runtime": 3.1604, | |
| "eval_samples_per_second": 2131.066, | |
| "eval_steps_per_second": 133.211, | |
| "step": 34101 | |
| }, | |
| { | |
| "epoch": 9.105304829770388, | |
| "grad_norm": 0.12073567509651184, | |
| "learning_rate": 9.518194070080863e-07, | |
| "loss": 0.0404, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 9.237265769332277, | |
| "grad_norm": 0.8697101473808289, | |
| "learning_rate": 8.114330637915545e-07, | |
| "loss": 0.0445, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 9.369226708894168, | |
| "grad_norm": 0.02095695585012436, | |
| "learning_rate": 6.710467205750225e-07, | |
| "loss": 0.043, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 9.501187648456057, | |
| "grad_norm": 0.010166825726628304, | |
| "learning_rate": 5.306603773584906e-07, | |
| "loss": 0.0441, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 9.633148588017947, | |
| "grad_norm": 0.10797163844108582, | |
| "learning_rate": 3.902740341419587e-07, | |
| "loss": 0.0388, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 9.765109527579837, | |
| "grad_norm": 0.16056068241596222, | |
| "learning_rate": 2.4988769092542675e-07, | |
| "loss": 0.034, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 9.897070467141726, | |
| "grad_norm": 0.35128360986709595, | |
| "learning_rate": 1.095013477088949e-07, | |
| "loss": 0.0414, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.9499628804751299, | |
| "eval_loss": 0.3629242777824402, | |
| "eval_runtime": 3.1958, | |
| "eval_samples_per_second": 2107.443, | |
| "eval_steps_per_second": 131.735, | |
| "step": 37890 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 37890, | |
| "total_flos": 1.114824255444396e+16, | |
| "train_loss": 0.12249788010960426, | |
| "train_runtime": 1979.2422, | |
| "train_samples_per_second": 306.249, | |
| "train_steps_per_second": 19.144 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 37890, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.114824255444396e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |