| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 0.011836794283129537, | |
| "learning_rate": 5.333333333333334e-06, | |
| "loss": 0.1784, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 0.013144169911195151, | |
| "learning_rate": 1.0666666666666667e-05, | |
| "loss": 0.183, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.0121269303983867, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.1892, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.012991239812108061, | |
| "learning_rate": 2.1333333333333335e-05, | |
| "loss": 0.1924, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.014210684336703932, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 0.1851, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.014497661416210644, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.1858, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.01501360143801389, | |
| "learning_rate": 3.733333333333334e-05, | |
| "loss": 0.1786, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.01545934778539957, | |
| "learning_rate": 4.266666666666667e-05, | |
| "loss": 0.1778, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.017592772153285995, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.1828, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.01918684683158518, | |
| "learning_rate": 5.333333333333333e-05, | |
| "loss": 0.1783, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.02070618658939026, | |
| "learning_rate": 5.8666666666666665e-05, | |
| "loss": 0.1677, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.020311054637052683, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 0.1677, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.017183460099699004, | |
| "learning_rate": 6.933333333333334e-05, | |
| "loss": 0.166, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.021310100264076974, | |
| "learning_rate": 7.466666666666667e-05, | |
| "loss": 0.1553, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.021818518291701307, | |
| "learning_rate": 8e-05, | |
| "loss": 0.1567, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.018902157014422046, | |
| "learning_rate": 7.998916964717848e-05, | |
| "loss": 0.1408, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.015623312164157991, | |
| "learning_rate": 7.9956684453541e-05, | |
| "loss": 0.1545, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.01808723881045501, | |
| "learning_rate": 7.990256201039297e-05, | |
| "loss": 0.1372, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.01916171140544312, | |
| "learning_rate": 7.982683162599218e-05, | |
| "loss": 0.1517, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.017730305482550706, | |
| "learning_rate": 7.972953430967773e-05, | |
| "loss": 0.1326, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.015762369106450024, | |
| "learning_rate": 7.961072274966282e-05, | |
| "loss": 0.1197, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.014292327322379344, | |
| "learning_rate": 7.947046128450319e-05, | |
| "loss": 0.1152, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.01437707355914928, | |
| "learning_rate": 7.930882586825653e-05, | |
| "loss": 0.1264, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.012970610862858245, | |
| "learning_rate": 7.912590402935223e-05, | |
| "loss": 0.1102, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.012172702786992022, | |
| "learning_rate": 7.892179482319297e-05, | |
| "loss": 0.1077, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.012798080837211678, | |
| "learning_rate": 7.869660877851456e-05, | |
| "loss": 0.1056, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 0.012379439835100994, | |
| "learning_rate": 7.845046783753276e-05, | |
| "loss": 0.0935, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.012484193428510048, | |
| "learning_rate": 7.818350528990929e-05, | |
| "loss": 0.0967, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.013072802773236348, | |
| "learning_rate": 7.789586570057317e-05, | |
| "loss": 0.0904, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.012612278116937011, | |
| "learning_rate": 7.758770483143634e-05, | |
| "loss": 0.0759, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.011039928859156514, | |
| "learning_rate": 7.72591895570457e-05, | |
| "loss": 0.0769, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.010319234755387824, | |
| "learning_rate": 7.69104977742177e-05, | |
| "loss": 0.0881, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.012109271833835127, | |
| "learning_rate": 7.654181830570404e-05, | |
| "loss": 0.0775, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.009586595074910477, | |
| "learning_rate": 7.615335079794083e-05, | |
| "loss": 0.0835, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.010025040945538152, | |
| "learning_rate": 7.57453056129365e-05, | |
| "loss": 0.066, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.011787300617647403, | |
| "learning_rate": 7.531790371435709e-05, | |
| "loss": 0.0803, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.009810688082093844, | |
| "learning_rate": 7.48713765478705e-05, | |
| "loss": 0.0644, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.010733020871554734, | |
| "learning_rate": 7.440596591581463e-05, | |
| "loss": 0.0628, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.010655707774709684, | |
| "learning_rate": 7.392192384625704e-05, | |
| "loss": 0.0611, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.009510943024388718, | |
| "learning_rate": 7.341951245651747e-05, | |
| "loss": 0.065, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.010173693871175086, | |
| "learning_rate": 7.28990038112265e-05, | |
| "loss": 0.066, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.010747361142782984, | |
| "learning_rate": 7.236067977499791e-05, | |
| "loss": 0.062, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.010882093858595883, | |
| "learning_rate": 7.180483185979392e-05, | |
| "loss": 0.0665, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.012512937845818453, | |
| "learning_rate": 7.123176106706638e-05, | |
| "loss": 0.0784, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.010128941659929386, | |
| "learning_rate": 7.064177772475912e-05, | |
| "loss": 0.0572, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.013042607607054833, | |
| "learning_rate": 7.003520131925997e-05, | |
| "loss": 0.0492, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.01124029105308844, | |
| "learning_rate": 6.941236032239316e-05, | |
| "loss": 0.0645, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.010498920210130373, | |
| "learning_rate": 6.877359201354606e-05, | |
| "loss": 0.0529, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.011375917727348396, | |
| "learning_rate": 6.811924229702648e-05, | |
| "loss": 0.0533, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.013077996119079647, | |
| "learning_rate": 6.744966551474936e-05, | |
| "loss": 0.0496, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 0.01198480727920829, | |
| "learning_rate": 6.676522425435433e-05, | |
| "loss": 0.055, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 0.012120833249446987, | |
| "learning_rate": 6.606628915285822e-05, | |
| "loss": 0.0454, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "grad_norm": 0.010976794460207764, | |
| "learning_rate": 6.53532386959484e-05, | |
| "loss": 0.0458, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 0.009461493270210048, | |
| "learning_rate": 6.462645901302633e-05, | |
| "loss": 0.0399, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 0.009141191578587225, | |
| "learning_rate": 6.388634366811146e-05, | |
| "loss": 0.0481, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 0.012268190521641324, | |
| "learning_rate": 6.313329344671946e-05, | |
| "loss": 0.0516, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.1400000000000001, | |
| "grad_norm": 0.012173840413446102, | |
| "learning_rate": 6.236771613882987e-05, | |
| "loss": 0.0547, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 0.011339250919817995, | |
| "learning_rate": 6.159002631806052e-05, | |
| "loss": 0.0443, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "grad_norm": 0.009875357947909695, | |
| "learning_rate": 6.0800645117168616e-05, | |
| "loss": 0.0407, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.009050582564290621, | |
| "learning_rate": 6.000000000000001e-05, | |
| "loss": 0.0366, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "grad_norm": 0.01199662586737057, | |
| "learning_rate": 5.918852453000986e-05, | |
| "loss": 0.0516, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 0.009777166655536148, | |
| "learning_rate": 5.836665813548047e-05, | |
| "loss": 0.0366, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "grad_norm": 0.009680688543701936, | |
| "learning_rate": 5.75348458715631e-05, | |
| "loss": 0.05, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 0.010134572008227292, | |
| "learning_rate": 5.669353817927272e-05, | |
| "loss": 0.0487, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "grad_norm": 0.008083377755641951, | |
| "learning_rate": 5.584319064156628e-05, | |
| "loss": 0.042, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 0.008575428314508896, | |
| "learning_rate": 5.4984263736636494e-05, | |
| "loss": 0.0439, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "grad_norm": 0.007817885855081317, | |
| "learning_rate": 5.4117222588554756e-05, | |
| "loss": 0.0518, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 1.3599999999999999, | |
| "grad_norm": 0.009674626271834803, | |
| "learning_rate": 5.324253671539833e-05, | |
| "loss": 0.0507, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "grad_norm": 0.009225628045924186, | |
| "learning_rate": 5.23606797749979e-05, | |
| "loss": 0.0463, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 0.007217961376662902, | |
| "learning_rate": 5.1472129308443616e-05, | |
| "loss": 0.0385, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "grad_norm": 0.0099798783808331, | |
| "learning_rate": 5.05773664814881e-05, | |
| "loss": 0.041, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 0.00789103284123039, | |
| "learning_rate": 4.967687582398671e-05, | |
| "loss": 0.0438, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "grad_norm": 0.008448213641863288, | |
| "learning_rate": 4.877114496751613e-05, | |
| "loss": 0.0448, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 0.0077635722344695, | |
| "learning_rate": 4.786066438131321e-05, | |
| "loss": 0.0432, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.008728997096493834, | |
| "learning_rate": 4.694592710667723e-05, | |
| "loss": 0.0432, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 0.008621728116344941, | |
| "learning_rate": 4.602742848997933e-05, | |
| "loss": 0.0367, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "grad_norm": 0.008119565893153342, | |
| "learning_rate": 4.51056659144238e-05, | |
| "loss": 0.0565, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 0.008502699325342705, | |
| "learning_rate": 4.418113853070614e-05, | |
| "loss": 0.0435, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "grad_norm": 0.007186727244466837, | |
| "learning_rate": 4.3254346986714334e-05, | |
| "loss": 0.0448, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.006984309685806434, | |
| "learning_rate": 4.2325793156419035e-05, | |
| "loss": 0.0449, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "grad_norm": 0.007613270974160622, | |
| "learning_rate": 4.139597986810005e-05, | |
| "loss": 0.04, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.6400000000000001, | |
| "grad_norm": 0.008747731426528158, | |
| "learning_rate": 4.046541063205589e-05, | |
| "loss": 0.0477, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.6600000000000001, | |
| "grad_norm": 0.007713687203062809, | |
| "learning_rate": 3.953458936794413e-05, | |
| "loss": 0.0361, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.6800000000000002, | |
| "grad_norm": 0.007469803525700852, | |
| "learning_rate": 3.860402013189998e-05, | |
| "loss": 0.0398, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "grad_norm": 0.008054987299454733, | |
| "learning_rate": 3.767420684358097e-05, | |
| "loss": 0.0374, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "grad_norm": 0.0069550147082264755, | |
| "learning_rate": 3.674565301328568e-05, | |
| "loss": 0.0375, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "grad_norm": 0.008574658064169316, | |
| "learning_rate": 3.581886146929387e-05, | |
| "loss": 0.0344, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "grad_norm": 0.008065675246990847, | |
| "learning_rate": 3.4894334085576215e-05, | |
| "loss": 0.0503, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "grad_norm": 0.008449200255129663, | |
| "learning_rate": 3.397257151002068e-05, | |
| "loss": 0.0497, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 0.008214620799597373, | |
| "learning_rate": 3.305407289332279e-05, | |
| "loss": 0.0431, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.8199999999999998, | |
| "grad_norm": 0.008181828575926744, | |
| "learning_rate": 3.213933561868679e-05, | |
| "loss": 0.0407, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.8399999999999999, | |
| "grad_norm": 0.0075980985532412984, | |
| "learning_rate": 3.122885503248386e-05, | |
| "loss": 0.0384, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.8599999999999999, | |
| "grad_norm": 0.007753220913052338, | |
| "learning_rate": 3.0323124176013297e-05, | |
| "loss": 0.0339, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "grad_norm": 0.006840530142098034, | |
| "learning_rate": 2.9422633518511926e-05, | |
| "loss": 0.0481, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 0.006332360472268186, | |
| "learning_rate": 2.8527870691556404e-05, | |
| "loss": 0.0339, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 0.007235145781102665, | |
| "learning_rate": 2.7639320225002108e-05, | |
| "loss": 0.0397, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "grad_norm": 0.007012760963826127, | |
| "learning_rate": 2.6757463284601682e-05, | |
| "loss": 0.0393, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "grad_norm": 0.007270176750961782, | |
| "learning_rate": 2.5882777411445254e-05, | |
| "loss": 0.0415, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "grad_norm": 0.007105912194783928, | |
| "learning_rate": 2.501573626336352e-05, | |
| "loss": 0.0409, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.0075041296466497555, | |
| "learning_rate": 2.4156809358433728e-05, | |
| "loss": 0.0476, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "grad_norm": 0.007229987336226452, | |
| "learning_rate": 2.330646182072729e-05, | |
| "loss": 0.0369, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "grad_norm": 0.00725928334005231, | |
| "learning_rate": 2.24651541284369e-05, | |
| "loss": 0.0519, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "grad_norm": 0.007680057407335098, | |
| "learning_rate": 2.1633341864519526e-05, | |
| "loss": 0.0432, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "grad_norm": 0.0065582180188280504, | |
| "learning_rate": 2.0811475469990167e-05, | |
| "loss": 0.0414, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 0.006539240271346896, | |
| "learning_rate": 2.0000000000000012e-05, | |
| "loss": 0.0457, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "grad_norm": 0.006512737641412582, | |
| "learning_rate": 1.9199354882831387e-05, | |
| "loss": 0.0325, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "grad_norm": 0.007145394245016067, | |
| "learning_rate": 1.8409973681939498e-05, | |
| "loss": 0.0374, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 0.006813685747154185, | |
| "learning_rate": 1.7632283861170135e-05, | |
| "loss": 0.0346, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "grad_norm": 0.006675567706173568, | |
| "learning_rate": 1.686670655328054e-05, | |
| "loss": 0.0412, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 0.007655830729660499, | |
| "learning_rate": 1.6113656331888563e-05, | |
| "loss": 0.0393, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "grad_norm": 0.0067588437862823765, | |
| "learning_rate": 1.537354098697367e-05, | |
| "loss": 0.0408, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 0.006829173089207945, | |
| "learning_rate": 1.4646761304051587e-05, | |
| "loss": 0.0462, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "grad_norm": 0.00776530999834305, | |
| "learning_rate": 1.3933710847141795e-05, | |
| "loss": 0.0468, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 2.2800000000000002, | |
| "grad_norm": 0.007504157880790311, | |
| "learning_rate": 1.3234775745645684e-05, | |
| "loss": 0.0321, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "grad_norm": 0.007952302277823793, | |
| "learning_rate": 1.2550334485250661e-05, | |
| "loss": 0.0385, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 0.006915155236149041, | |
| "learning_rate": 1.1880757702973531e-05, | |
| "loss": 0.0347, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "grad_norm": 0.007020779505852696, | |
| "learning_rate": 1.1226407986453963e-05, | |
| "loss": 0.0342, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "grad_norm": 0.007151568178165054, | |
| "learning_rate": 1.0587639677606857e-05, | |
| "loss": 0.037, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "grad_norm": 0.006821852843524058, | |
| "learning_rate": 9.964798680740033e-06, | |
| "loss": 0.0354, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 0.007044562968171477, | |
| "learning_rate": 9.358222275240884e-06, | |
| "loss": 0.0527, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "grad_norm": 0.006561647136472767, | |
| "learning_rate": 8.768238932933632e-06, | |
| "loss": 0.0325, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "grad_norm": 0.0067862182517935375, | |
| "learning_rate": 8.195168140206084e-06, | |
| "loss": 0.0392, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "grad_norm": 0.007679758074906389, | |
| "learning_rate": 7.639320225002106e-06, | |
| "loss": 0.0366, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "grad_norm": 0.007059203376016052, | |
| "learning_rate": 7.1009961887735075e-06, | |
| "loss": 0.0451, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.0068122235401948195, | |
| "learning_rate": 6.58048754348255e-06, | |
| "loss": 0.0466, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "grad_norm": 0.006459633695720914, | |
| "learning_rate": 6.078076153742962e-06, | |
| "loss": 0.0438, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "grad_norm": 0.0066282359320979705, | |
| "learning_rate": 5.5940340841853915e-06, | |
| "loss": 0.0345, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 0.007692912237975653, | |
| "learning_rate": 5.128623452129508e-06, | |
| "loss": 0.0397, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "grad_norm": 0.006824212526305754, | |
| "learning_rate": 4.6820962856429205e-06, | |
| "loss": 0.0347, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 0.007239649090287155, | |
| "learning_rate": 4.254694387063514e-06, | |
| "loss": 0.0333, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "grad_norm": 0.00793644072068399, | |
| "learning_rate": 3.846649202059181e-06, | |
| "loss": 0.0412, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "grad_norm": 0.007215797336986897, | |
| "learning_rate": 3.458181694295961e-06, | |
| "loss": 0.0325, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "grad_norm": 0.008398503063860334, | |
| "learning_rate": 3.0895022257823083e-06, | |
| "loss": 0.0396, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "grad_norm": 0.00708248023260857, | |
| "learning_rate": 2.7408104429543025e-06, | |
| "loss": 0.0363, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "grad_norm": 0.007119394542491576, | |
| "learning_rate": 2.4122951685636674e-06, | |
| "loss": 0.0346, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.7199999999999998, | |
| "grad_norm": 0.007078140341139728, | |
| "learning_rate": 2.104134299426832e-06, | |
| "loss": 0.0348, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "grad_norm": 0.006784084855431164, | |
| "learning_rate": 1.8164947100907238e-06, | |
| "loss": 0.0422, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "grad_norm": 0.00769139233532723, | |
| "learning_rate": 1.5495321624672443e-06, | |
| "loss": 0.0505, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 2.7800000000000002, | |
| "grad_norm": 0.00647599020300139, | |
| "learning_rate": 1.3033912214854482e-06, | |
| "loss": 0.0486, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 0.007093377884829064, | |
| "learning_rate": 1.0782051768070477e-06, | |
| "loss": 0.0369, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "grad_norm": 0.00691704950619504, | |
| "learning_rate": 8.740959706477725e-07, | |
| "loss": 0.04, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "grad_norm": 0.006718766519716148, | |
| "learning_rate": 6.911741317434706e-07, | |
| "loss": 0.0438, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "grad_norm": 0.0064257013556098756, | |
| "learning_rate": 5.295387154968312e-07, | |
| "loss": 0.0338, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 0.006571050709996742, | |
| "learning_rate": 3.8927725033718553e-07, | |
| "loss": 0.0341, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "grad_norm": 0.007149178655125779, | |
| "learning_rate": 2.704656903222791e-07, | |
| "loss": 0.0345, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "grad_norm": 0.007436951516971702, | |
| "learning_rate": 1.7316837400782604e-07, | |
| "loss": 0.0342, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "grad_norm": 0.008394531157711207, | |
| "learning_rate": 9.74379896070321e-08, | |
| "loss": 0.047, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "grad_norm": 0.0072046735629130165, | |
| "learning_rate": 4.331554645901737e-08, | |
| "loss": 0.0404, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "grad_norm": 0.006777657382950411, | |
| "learning_rate": 1.0830352821531442e-08, | |
| "loss": 0.0376, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.007529332702697721, | |
| "learning_rate": 0.0, | |
| "loss": 0.039, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 150, | |
| "total_flos": 109013455077376.0, | |
| "train_loss": 0.06588543598850569, | |
| "train_runtime": 1227.8596, | |
| "train_samples_per_second": 0.977, | |
| "train_steps_per_second": 0.122 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 109013455077376.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |