| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 184, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.010869565217391304, | |
| "grad_norm": 0.6413319706916809, | |
| "learning_rate": 0.0, | |
| "loss": 0.2266, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.021739130434782608, | |
| "grad_norm": 0.7888997197151184, | |
| "learning_rate": 6e-06, | |
| "loss": 0.3224, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.03260869565217391, | |
| "grad_norm": 1.0116347074508667, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.1723, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.043478260869565216, | |
| "grad_norm": 0.22849714756011963, | |
| "learning_rate": 1.8e-05, | |
| "loss": 0.1144, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.05434782608695652, | |
| "grad_norm": 0.4945598244667053, | |
| "learning_rate": 2.4e-05, | |
| "loss": 0.2002, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.06521739130434782, | |
| "grad_norm": 0.35463154315948486, | |
| "learning_rate": 3e-05, | |
| "loss": 0.1464, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.07608695652173914, | |
| "grad_norm": 0.29865413904190063, | |
| "learning_rate": 2.983240223463687e-05, | |
| "loss": 0.1837, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.08695652173913043, | |
| "grad_norm": 0.5177946090698242, | |
| "learning_rate": 2.9664804469273744e-05, | |
| "loss": 0.3532, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.09782608695652174, | |
| "grad_norm": 0.5293675661087036, | |
| "learning_rate": 2.9497206703910613e-05, | |
| "loss": 0.295, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.10869565217391304, | |
| "grad_norm": 0.4006047248840332, | |
| "learning_rate": 2.9329608938547488e-05, | |
| "loss": 0.2877, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11956521739130435, | |
| "grad_norm": 0.42227375507354736, | |
| "learning_rate": 2.9162011173184356e-05, | |
| "loss": 0.306, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.13043478260869565, | |
| "grad_norm": 0.5574392676353455, | |
| "learning_rate": 2.899441340782123e-05, | |
| "loss": 0.2168, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.14130434782608695, | |
| "grad_norm": 0.5573036074638367, | |
| "learning_rate": 2.88268156424581e-05, | |
| "loss": 0.3029, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.15217391304347827, | |
| "grad_norm": 0.578084409236908, | |
| "learning_rate": 2.8659217877094975e-05, | |
| "loss": 0.2931, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.16304347826086957, | |
| "grad_norm": 0.35756808519363403, | |
| "learning_rate": 2.8491620111731843e-05, | |
| "loss": 0.1996, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.17391304347826086, | |
| "grad_norm": 0.7275254130363464, | |
| "learning_rate": 2.8324022346368715e-05, | |
| "loss": 0.2566, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.18478260869565216, | |
| "grad_norm": 0.3866647481918335, | |
| "learning_rate": 2.8156424581005587e-05, | |
| "loss": 0.2229, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.1956521739130435, | |
| "grad_norm": 0.5137191414833069, | |
| "learning_rate": 2.798882681564246e-05, | |
| "loss": 0.2982, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.20652173913043478, | |
| "grad_norm": 0.46941468119621277, | |
| "learning_rate": 2.782122905027933e-05, | |
| "loss": 0.3353, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.21739130434782608, | |
| "grad_norm": 0.3693690896034241, | |
| "learning_rate": 2.7653631284916202e-05, | |
| "loss": 0.2472, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.22826086956521738, | |
| "grad_norm": 0.5384896397590637, | |
| "learning_rate": 2.7486033519553074e-05, | |
| "loss": 0.315, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.2391304347826087, | |
| "grad_norm": 0.4074620306491852, | |
| "learning_rate": 2.7318435754189945e-05, | |
| "loss": 0.2249, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.3575456440448761, | |
| "learning_rate": 2.7150837988826817e-05, | |
| "loss": 0.2042, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.2608695652173913, | |
| "grad_norm": 0.4679282605648041, | |
| "learning_rate": 2.698324022346369e-05, | |
| "loss": 0.2471, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.2717391304347826, | |
| "grad_norm": 0.3495662212371826, | |
| "learning_rate": 2.6815642458100557e-05, | |
| "loss": 0.1561, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.2826086956521739, | |
| "grad_norm": 0.37626883387565613, | |
| "learning_rate": 2.6648044692737432e-05, | |
| "loss": 0.1931, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.29347826086956524, | |
| "grad_norm": 0.3739338219165802, | |
| "learning_rate": 2.64804469273743e-05, | |
| "loss": 0.2017, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.30434782608695654, | |
| "grad_norm": 0.4975775182247162, | |
| "learning_rate": 2.6312849162011176e-05, | |
| "loss": 0.2102, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.31521739130434784, | |
| "grad_norm": 0.38530030846595764, | |
| "learning_rate": 2.6145251396648044e-05, | |
| "loss": 0.2399, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.32608695652173914, | |
| "grad_norm": 0.5597913861274719, | |
| "learning_rate": 2.597765363128492e-05, | |
| "loss": 0.2611, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.33695652173913043, | |
| "grad_norm": 0.5215259790420532, | |
| "learning_rate": 2.5810055865921788e-05, | |
| "loss": 0.3184, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.34782608695652173, | |
| "grad_norm": 0.3913455307483673, | |
| "learning_rate": 2.564245810055866e-05, | |
| "loss": 0.2889, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.358695652173913, | |
| "grad_norm": 0.32094502449035645, | |
| "learning_rate": 2.547486033519553e-05, | |
| "loss": 0.2278, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.3695652173913043, | |
| "grad_norm": 0.3086216151714325, | |
| "learning_rate": 2.5307262569832403e-05, | |
| "loss": 0.1388, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.3804347826086957, | |
| "grad_norm": 0.42003577947616577, | |
| "learning_rate": 2.5139664804469275e-05, | |
| "loss": 0.1996, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.391304347826087, | |
| "grad_norm": 0.3483677804470062, | |
| "learning_rate": 2.4972067039106143e-05, | |
| "loss": 0.1602, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.40217391304347827, | |
| "grad_norm": 0.44446852803230286, | |
| "learning_rate": 2.4804469273743018e-05, | |
| "loss": 0.3283, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.41304347826086957, | |
| "grad_norm": 0.570021390914917, | |
| "learning_rate": 2.4636871508379887e-05, | |
| "loss": 0.3251, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.42391304347826086, | |
| "grad_norm": 0.784820556640625, | |
| "learning_rate": 2.4469273743016762e-05, | |
| "loss": 0.2747, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.43478260869565216, | |
| "grad_norm": 0.40593597292900085, | |
| "learning_rate": 2.430167597765363e-05, | |
| "loss": 0.3375, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.44565217391304346, | |
| "grad_norm": 0.45933520793914795, | |
| "learning_rate": 2.4134078212290505e-05, | |
| "loss": 0.2273, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.45652173913043476, | |
| "grad_norm": 0.4543314576148987, | |
| "learning_rate": 2.3966480446927374e-05, | |
| "loss": 0.2565, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.4673913043478261, | |
| "grad_norm": 0.5618748068809509, | |
| "learning_rate": 2.379888268156425e-05, | |
| "loss": 0.2571, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.4782608695652174, | |
| "grad_norm": 0.4227646291255951, | |
| "learning_rate": 2.3631284916201117e-05, | |
| "loss": 0.3006, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.4891304347826087, | |
| "grad_norm": 0.4286762475967407, | |
| "learning_rate": 2.346368715083799e-05, | |
| "loss": 0.3206, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.4858555197715759, | |
| "learning_rate": 2.329608938547486e-05, | |
| "loss": 0.1936, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.5108695652173914, | |
| "grad_norm": 0.36394625902175903, | |
| "learning_rate": 2.3128491620111732e-05, | |
| "loss": 0.167, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.5217391304347826, | |
| "grad_norm": 0.4544750154018402, | |
| "learning_rate": 2.2960893854748604e-05, | |
| "loss": 0.3839, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.532608695652174, | |
| "grad_norm": 1.09042489528656, | |
| "learning_rate": 2.2793296089385476e-05, | |
| "loss": 0.2707, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.5434782608695652, | |
| "grad_norm": 0.3253614604473114, | |
| "learning_rate": 2.2625698324022348e-05, | |
| "loss": 0.1884, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5543478260869565, | |
| "grad_norm": 0.5140789151191711, | |
| "learning_rate": 2.245810055865922e-05, | |
| "loss": 0.2176, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.5652173913043478, | |
| "grad_norm": 0.3601592779159546, | |
| "learning_rate": 2.229050279329609e-05, | |
| "loss": 0.2518, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.5760869565217391, | |
| "grad_norm": 0.49350816011428833, | |
| "learning_rate": 2.2122905027932963e-05, | |
| "loss": 0.2949, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.5869565217391305, | |
| "grad_norm": 0.3795137107372284, | |
| "learning_rate": 2.195530726256983e-05, | |
| "loss": 0.2352, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.5978260869565217, | |
| "grad_norm": 0.3705698847770691, | |
| "learning_rate": 2.1787709497206706e-05, | |
| "loss": 0.2096, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.6086956521739131, | |
| "grad_norm": 0.2584347724914551, | |
| "learning_rate": 2.1620111731843575e-05, | |
| "loss": 0.1232, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.6195652173913043, | |
| "grad_norm": 0.6062581539154053, | |
| "learning_rate": 2.1452513966480446e-05, | |
| "loss": 0.515, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.6304347826086957, | |
| "grad_norm": 0.5680787563323975, | |
| "learning_rate": 2.1284916201117318e-05, | |
| "loss": 0.3798, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.6413043478260869, | |
| "grad_norm": 0.23708926141262054, | |
| "learning_rate": 2.111731843575419e-05, | |
| "loss": 0.175, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.6521739130434783, | |
| "grad_norm": 0.39348331093788147, | |
| "learning_rate": 2.0949720670391062e-05, | |
| "loss": 0.1914, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6630434782608695, | |
| "grad_norm": 0.5686835646629333, | |
| "learning_rate": 2.0782122905027933e-05, | |
| "loss": 0.3916, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.6739130434782609, | |
| "grad_norm": 0.5686026215553284, | |
| "learning_rate": 2.0614525139664805e-05, | |
| "loss": 0.228, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.6847826086956522, | |
| "grad_norm": 0.4128269553184509, | |
| "learning_rate": 2.0446927374301674e-05, | |
| "loss": 0.2479, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.6956521739130435, | |
| "grad_norm": 0.4272077679634094, | |
| "learning_rate": 2.027932960893855e-05, | |
| "loss": 0.284, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.7065217391304348, | |
| "grad_norm": 0.5708092451095581, | |
| "learning_rate": 2.0111731843575417e-05, | |
| "loss": 0.3804, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.717391304347826, | |
| "grad_norm": 0.6123507618904114, | |
| "learning_rate": 1.9944134078212292e-05, | |
| "loss": 0.2713, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.7282608695652174, | |
| "grad_norm": 0.3932100236415863, | |
| "learning_rate": 1.977653631284916e-05, | |
| "loss": 0.2351, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.7391304347826086, | |
| "grad_norm": 0.3840418756008148, | |
| "learning_rate": 1.9608938547486036e-05, | |
| "loss": 0.2837, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.5631595849990845, | |
| "learning_rate": 1.9441340782122904e-05, | |
| "loss": 0.188, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.7608695652173914, | |
| "grad_norm": 0.29784706234931946, | |
| "learning_rate": 1.927374301675978e-05, | |
| "loss": 0.1542, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7717391304347826, | |
| "grad_norm": 0.34363874793052673, | |
| "learning_rate": 1.9106145251396648e-05, | |
| "loss": 0.1668, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.782608695652174, | |
| "grad_norm": 0.6488922238349915, | |
| "learning_rate": 1.893854748603352e-05, | |
| "loss": 0.2076, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.7934782608695652, | |
| "grad_norm": 0.36249950528144836, | |
| "learning_rate": 1.877094972067039e-05, | |
| "loss": 0.2539, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.8043478260869565, | |
| "grad_norm": 0.6032502055168152, | |
| "learning_rate": 1.8603351955307263e-05, | |
| "loss": 0.2912, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.8152173913043478, | |
| "grad_norm": 0.393883615732193, | |
| "learning_rate": 1.8435754189944135e-05, | |
| "loss": 0.2225, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.8260869565217391, | |
| "grad_norm": 0.35355567932128906, | |
| "learning_rate": 1.8268156424581006e-05, | |
| "loss": 0.2471, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.8369565217391305, | |
| "grad_norm": 0.6523119807243347, | |
| "learning_rate": 1.8100558659217878e-05, | |
| "loss": 0.3467, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.8478260869565217, | |
| "grad_norm": 0.4586833119392395, | |
| "learning_rate": 1.793296089385475e-05, | |
| "loss": 0.2891, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.8586956521739131, | |
| "grad_norm": 0.5058690905570984, | |
| "learning_rate": 1.776536312849162e-05, | |
| "loss": 0.2504, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 0.3577556014060974, | |
| "learning_rate": 1.7597765363128493e-05, | |
| "loss": 0.1831, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8804347826086957, | |
| "grad_norm": 0.36402180790901184, | |
| "learning_rate": 1.7430167597765365e-05, | |
| "loss": 0.2354, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.8913043478260869, | |
| "grad_norm": 0.3851219415664673, | |
| "learning_rate": 1.7262569832402237e-05, | |
| "loss": 0.2237, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.9021739130434783, | |
| "grad_norm": 0.4134155511856079, | |
| "learning_rate": 1.7094972067039105e-05, | |
| "loss": 0.2284, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.9130434782608695, | |
| "grad_norm": 0.5397670865058899, | |
| "learning_rate": 1.6927374301675977e-05, | |
| "loss": 0.2424, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.9239130434782609, | |
| "grad_norm": 0.41816258430480957, | |
| "learning_rate": 1.675977653631285e-05, | |
| "loss": 0.1897, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.9347826086956522, | |
| "grad_norm": 0.4885289669036865, | |
| "learning_rate": 1.659217877094972e-05, | |
| "loss": 0.2853, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.9456521739130435, | |
| "grad_norm": 0.4835894703865051, | |
| "learning_rate": 1.6424581005586592e-05, | |
| "loss": 0.2612, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.9565217391304348, | |
| "grad_norm": 0.3384779095649719, | |
| "learning_rate": 1.6256983240223464e-05, | |
| "loss": 0.204, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.967391304347826, | |
| "grad_norm": 0.42653387784957886, | |
| "learning_rate": 1.6089385474860336e-05, | |
| "loss": 0.1801, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.9782608695652174, | |
| "grad_norm": 0.46495750546455383, | |
| "learning_rate": 1.5921787709497207e-05, | |
| "loss": 0.3809, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9891304347826086, | |
| "grad_norm": 0.7088552117347717, | |
| "learning_rate": 1.575418994413408e-05, | |
| "loss": 0.3338, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.4994565546512604, | |
| "learning_rate": 1.5586592178770948e-05, | |
| "loss": 0.3197, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.0108695652173914, | |
| "grad_norm": 0.5401270985603333, | |
| "learning_rate": 1.5418994413407823e-05, | |
| "loss": 0.3347, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.0217391304347827, | |
| "grad_norm": 0.30618318915367126, | |
| "learning_rate": 1.5251396648044693e-05, | |
| "loss": 0.1709, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.0326086956521738, | |
| "grad_norm": 0.28916090726852417, | |
| "learning_rate": 1.5083798882681566e-05, | |
| "loss": 0.211, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.0434782608695652, | |
| "grad_norm": 0.36955466866493225, | |
| "learning_rate": 1.4916201117318435e-05, | |
| "loss": 0.173, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.0543478260869565, | |
| "grad_norm": 0.39201200008392334, | |
| "learning_rate": 1.4748603351955306e-05, | |
| "loss": 0.3109, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.065217391304348, | |
| "grad_norm": 0.5664386749267578, | |
| "learning_rate": 1.4581005586592178e-05, | |
| "loss": 0.2121, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.0760869565217392, | |
| "grad_norm": 0.3521777391433716, | |
| "learning_rate": 1.441340782122905e-05, | |
| "loss": 0.2422, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.0869565217391304, | |
| "grad_norm": 0.46047309041023254, | |
| "learning_rate": 1.4245810055865922e-05, | |
| "loss": 0.2901, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0978260869565217, | |
| "grad_norm": 0.3169825077056885, | |
| "learning_rate": 1.4078212290502793e-05, | |
| "loss": 0.167, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.108695652173913, | |
| "grad_norm": 0.45265427231788635, | |
| "learning_rate": 1.3910614525139665e-05, | |
| "loss": 0.2639, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.1195652173913044, | |
| "grad_norm": 0.2628476619720459, | |
| "learning_rate": 1.3743016759776537e-05, | |
| "loss": 0.1409, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.1304347826086956, | |
| "grad_norm": 0.42469266057014465, | |
| "learning_rate": 1.3575418994413409e-05, | |
| "loss": 0.2365, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.141304347826087, | |
| "grad_norm": 0.554024875164032, | |
| "learning_rate": 1.3407821229050279e-05, | |
| "loss": 0.2356, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.1521739130434783, | |
| "grad_norm": 0.3486219346523285, | |
| "learning_rate": 1.324022346368715e-05, | |
| "loss": 0.2101, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.1630434782608696, | |
| "grad_norm": 0.33959364891052246, | |
| "learning_rate": 1.3072625698324022e-05, | |
| "loss": 0.1877, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.1739130434782608, | |
| "grad_norm": 0.4453425705432892, | |
| "learning_rate": 1.2905027932960894e-05, | |
| "loss": 0.254, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.184782608695652, | |
| "grad_norm": 0.38810184597969055, | |
| "learning_rate": 1.2737430167597766e-05, | |
| "loss": 0.1778, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.1956521739130435, | |
| "grad_norm": 0.28903669118881226, | |
| "learning_rate": 1.2569832402234637e-05, | |
| "loss": 0.1444, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.2065217391304348, | |
| "grad_norm": 0.9634750485420227, | |
| "learning_rate": 1.2402234636871509e-05, | |
| "loss": 0.2042, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.2173913043478262, | |
| "grad_norm": 0.5904025435447693, | |
| "learning_rate": 1.2234636871508381e-05, | |
| "loss": 0.5138, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.2282608695652173, | |
| "grad_norm": 0.36438241600990295, | |
| "learning_rate": 1.2067039106145253e-05, | |
| "loss": 0.197, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.2391304347826086, | |
| "grad_norm": 0.451325386762619, | |
| "learning_rate": 1.1899441340782124e-05, | |
| "loss": 0.205, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.3305754065513611, | |
| "learning_rate": 1.1731843575418994e-05, | |
| "loss": 0.1729, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.2608695652173914, | |
| "grad_norm": 0.45812341570854187, | |
| "learning_rate": 1.1564245810055866e-05, | |
| "loss": 0.2244, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.2717391304347827, | |
| "grad_norm": 0.3606882691383362, | |
| "learning_rate": 1.1396648044692738e-05, | |
| "loss": 0.208, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.2826086956521738, | |
| "grad_norm": 0.46164172887802124, | |
| "learning_rate": 1.122905027932961e-05, | |
| "loss": 0.1948, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.2934782608695652, | |
| "grad_norm": 0.34960290789604187, | |
| "learning_rate": 1.1061452513966481e-05, | |
| "loss": 0.1657, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.3043478260869565, | |
| "grad_norm": 2.9199092388153076, | |
| "learning_rate": 1.0893854748603353e-05, | |
| "loss": 0.205, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.315217391304348, | |
| "grad_norm": 0.45626726746559143, | |
| "learning_rate": 1.0726256983240223e-05, | |
| "loss": 0.2369, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.3260869565217392, | |
| "grad_norm": 0.3579792082309723, | |
| "learning_rate": 1.0558659217877095e-05, | |
| "loss": 0.1428, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.3369565217391304, | |
| "grad_norm": 0.7473917603492737, | |
| "learning_rate": 1.0391061452513967e-05, | |
| "loss": 0.2469, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.3478260869565217, | |
| "grad_norm": 0.45503562688827515, | |
| "learning_rate": 1.0223463687150837e-05, | |
| "loss": 0.2559, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.358695652173913, | |
| "grad_norm": 0.49676379561424255, | |
| "learning_rate": 1.0055865921787709e-05, | |
| "loss": 0.1807, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.3695652173913042, | |
| "grad_norm": 0.5722432136535645, | |
| "learning_rate": 9.88826815642458e-06, | |
| "loss": 0.2907, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.3804347826086958, | |
| "grad_norm": 0.43931692838668823, | |
| "learning_rate": 9.720670391061452e-06, | |
| "loss": 0.2484, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.391304347826087, | |
| "grad_norm": 0.44338878989219666, | |
| "learning_rate": 9.553072625698324e-06, | |
| "loss": 0.1989, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.4021739130434783, | |
| "grad_norm": 0.3508244752883911, | |
| "learning_rate": 9.385474860335196e-06, | |
| "loss": 0.1076, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.4130434782608696, | |
| "grad_norm": 0.4579634368419647, | |
| "learning_rate": 9.217877094972067e-06, | |
| "loss": 0.2697, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.4239130434782608, | |
| "grad_norm": 0.44404348731040955, | |
| "learning_rate": 9.050279329608939e-06, | |
| "loss": 0.1973, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.434782608695652, | |
| "grad_norm": 0.509229302406311, | |
| "learning_rate": 8.88268156424581e-06, | |
| "loss": 0.219, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.4456521739130435, | |
| "grad_norm": 0.3267776370048523, | |
| "learning_rate": 8.715083798882683e-06, | |
| "loss": 0.2308, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.4565217391304348, | |
| "grad_norm": 0.45265960693359375, | |
| "learning_rate": 8.547486033519553e-06, | |
| "loss": 0.1877, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.4673913043478262, | |
| "grad_norm": 0.4702434539794922, | |
| "learning_rate": 8.379888268156424e-06, | |
| "loss": 0.2113, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.4782608695652173, | |
| "grad_norm": 0.3863651752471924, | |
| "learning_rate": 8.212290502793296e-06, | |
| "loss": 0.1779, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.4891304347826086, | |
| "grad_norm": 0.5470485687255859, | |
| "learning_rate": 8.044692737430168e-06, | |
| "loss": 0.237, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.45113468170166016, | |
| "learning_rate": 7.87709497206704e-06, | |
| "loss": 0.1921, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.5108695652173914, | |
| "grad_norm": 0.5311570167541504, | |
| "learning_rate": 7.709497206703911e-06, | |
| "loss": 0.2078, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 1.5217391304347827, | |
| "grad_norm": 0.3963381052017212, | |
| "learning_rate": 7.541899441340783e-06, | |
| "loss": 0.1163, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.5326086956521738, | |
| "grad_norm": 0.34116417169570923, | |
| "learning_rate": 7.374301675977653e-06, | |
| "loss": 0.1671, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 1.5434782608695652, | |
| "grad_norm": 0.35732734203338623, | |
| "learning_rate": 7.206703910614525e-06, | |
| "loss": 0.1619, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.5543478260869565, | |
| "grad_norm": 0.32546132802963257, | |
| "learning_rate": 7.039106145251397e-06, | |
| "loss": 0.1747, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.5652173913043477, | |
| "grad_norm": 0.42675408720970154, | |
| "learning_rate": 6.871508379888268e-06, | |
| "loss": 0.218, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.5760869565217392, | |
| "grad_norm": 0.5982142686843872, | |
| "learning_rate": 6.703910614525139e-06, | |
| "loss": 0.1619, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.5869565217391304, | |
| "grad_norm": 0.45559588074684143, | |
| "learning_rate": 6.536312849162011e-06, | |
| "loss": 0.2048, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 1.5978260869565217, | |
| "grad_norm": 0.4953802227973938, | |
| "learning_rate": 6.368715083798883e-06, | |
| "loss": 0.3124, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.608695652173913, | |
| "grad_norm": 0.3565727174282074, | |
| "learning_rate": 6.2011173184357546e-06, | |
| "loss": 0.141, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 1.6195652173913042, | |
| "grad_norm": 0.4055475890636444, | |
| "learning_rate": 6.033519553072626e-06, | |
| "loss": 0.1543, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.6304347826086958, | |
| "grad_norm": 0.5342203974723816, | |
| "learning_rate": 5.865921787709497e-06, | |
| "loss": 0.3022, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.641304347826087, | |
| "grad_norm": 0.2213074266910553, | |
| "learning_rate": 5.698324022346369e-06, | |
| "loss": 0.1157, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 1.6521739130434783, | |
| "grad_norm": 0.6883156299591064, | |
| "learning_rate": 5.530726256983241e-06, | |
| "loss": 0.1998, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 1.6630434782608696, | |
| "grad_norm": 0.3054094910621643, | |
| "learning_rate": 5.363128491620112e-06, | |
| "loss": 0.1062, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 1.6739130434782608, | |
| "grad_norm": 0.592028021812439, | |
| "learning_rate": 5.195530726256983e-06, | |
| "loss": 0.2939, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 1.6847826086956523, | |
| "grad_norm": 0.47281980514526367, | |
| "learning_rate": 5.027932960893854e-06, | |
| "loss": 0.275, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.6956521739130435, | |
| "grad_norm": 0.3268674612045288, | |
| "learning_rate": 4.860335195530726e-06, | |
| "loss": 0.1296, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 1.7065217391304348, | |
| "grad_norm": 0.4468993544578552, | |
| "learning_rate": 4.692737430167598e-06, | |
| "loss": 0.2552, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 1.7173913043478262, | |
| "grad_norm": 0.4080936312675476, | |
| "learning_rate": 4.5251396648044695e-06, | |
| "loss": 0.2028, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 1.7282608695652173, | |
| "grad_norm": 0.41294705867767334, | |
| "learning_rate": 4.357541899441341e-06, | |
| "loss": 0.2278, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 1.7391304347826086, | |
| "grad_norm": 0.38806530833244324, | |
| "learning_rate": 4.189944134078212e-06, | |
| "loss": 0.1507, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.4726288616657257, | |
| "learning_rate": 4.022346368715084e-06, | |
| "loss": 0.3179, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 1.7608695652173914, | |
| "grad_norm": 0.4887074828147888, | |
| "learning_rate": 3.854748603351956e-06, | |
| "loss": 0.2432, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 1.7717391304347827, | |
| "grad_norm": 0.49059274792671204, | |
| "learning_rate": 3.6871508379888266e-06, | |
| "loss": 0.1732, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 1.7826086956521738, | |
| "grad_norm": 0.4731513261795044, | |
| "learning_rate": 3.5195530726256983e-06, | |
| "loss": 0.3172, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 1.7934782608695652, | |
| "grad_norm": 0.47785452008247375, | |
| "learning_rate": 3.3519553072625697e-06, | |
| "loss": 0.2021, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.8043478260869565, | |
| "grad_norm": 0.38512149453163147, | |
| "learning_rate": 3.1843575418994414e-06, | |
| "loss": 0.1498, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 1.8152173913043477, | |
| "grad_norm": 0.529296338558197, | |
| "learning_rate": 3.016759776536313e-06, | |
| "loss": 0.2088, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 1.8260869565217392, | |
| "grad_norm": 0.3797279894351959, | |
| "learning_rate": 2.8491620111731845e-06, | |
| "loss": 0.1462, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 1.8369565217391304, | |
| "grad_norm": 0.3825872242450714, | |
| "learning_rate": 2.681564245810056e-06, | |
| "loss": 0.2365, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 1.8478260869565217, | |
| "grad_norm": 0.45181718468666077, | |
| "learning_rate": 2.513966480446927e-06, | |
| "loss": 0.1418, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.858695652173913, | |
| "grad_norm": 0.6402963399887085, | |
| "learning_rate": 2.346368715083799e-06, | |
| "loss": 0.2239, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 1.8695652173913042, | |
| "grad_norm": 0.3925747573375702, | |
| "learning_rate": 2.1787709497206706e-06, | |
| "loss": 0.2665, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 1.8804347826086958, | |
| "grad_norm": 0.49501070380210876, | |
| "learning_rate": 2.011173184357542e-06, | |
| "loss": 0.269, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 1.891304347826087, | |
| "grad_norm": 0.3930898904800415, | |
| "learning_rate": 1.8435754189944133e-06, | |
| "loss": 0.1823, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 1.9021739130434783, | |
| "grad_norm": 0.47687312960624695, | |
| "learning_rate": 1.6759776536312848e-06, | |
| "loss": 0.235, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.9130434782608696, | |
| "grad_norm": 0.31379279494285583, | |
| "learning_rate": 1.5083798882681566e-06, | |
| "loss": 0.1481, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 1.9239130434782608, | |
| "grad_norm": 2.4949660301208496, | |
| "learning_rate": 1.340782122905028e-06, | |
| "loss": 0.2184, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 1.9347826086956523, | |
| "grad_norm": 0.28981682658195496, | |
| "learning_rate": 1.1731843575418994e-06, | |
| "loss": 0.1353, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 1.9456521739130435, | |
| "grad_norm": 0.36853376030921936, | |
| "learning_rate": 1.005586592178771e-06, | |
| "loss": 0.1959, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 1.9565217391304348, | |
| "grad_norm": 0.28629860281944275, | |
| "learning_rate": 8.379888268156424e-07, | |
| "loss": 0.143, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.9673913043478262, | |
| "grad_norm": 0.5044825673103333, | |
| "learning_rate": 6.70391061452514e-07, | |
| "loss": 0.2263, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 1.9782608695652173, | |
| "grad_norm": 0.3991227447986603, | |
| "learning_rate": 5.027932960893855e-07, | |
| "loss": 0.21, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 1.9891304347826086, | |
| "grad_norm": 0.5287102460861206, | |
| "learning_rate": 3.35195530726257e-07, | |
| "loss": 0.2761, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.49904054403305054, | |
| "learning_rate": 1.675977653631285e-07, | |
| "loss": 0.1935, | |
| "step": 184 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 184, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.747683085492224e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |