| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 3.0, |
| "eval_steps": 500, |
| "global_step": 288, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0, |
| "eval_loss": 4.369115829467773, |
| "eval_num_tokens": 0.0, |
| "eval_runtime": 29.5676, |
| "eval_samples_per_second": 74.879, |
| "eval_steps_per_second": 2.367, |
| "step": 0 |
| }, |
| { |
| "epoch": 0.010416666666666666, |
| "grad_norm": 59.074302673339844, |
| "learning_rate": 0.0, |
| "loss": 16.1171, |
| "num_tokens": 1835008.0, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.020833333333333332, |
| "grad_norm": 49.583702087402344, |
| "learning_rate": 1.3793103448275863e-05, |
| "loss": 14.3376, |
| "num_tokens": 3669861.0, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.03125, |
| "grad_norm": 30.194101333618164, |
| "learning_rate": 2.7586206896551727e-05, |
| "loss": 11.9664, |
| "num_tokens": 5504404.0, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.041666666666666664, |
| "grad_norm": 15.582443237304688, |
| "learning_rate": 4.1379310344827587e-05, |
| "loss": 10.0953, |
| "num_tokens": 7338364.0, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.052083333333333336, |
| "grad_norm": 8.575087547302246, |
| "learning_rate": 5.517241379310345e-05, |
| "loss": 9.3311, |
| "num_tokens": 9170987.0, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.0625, |
| "grad_norm": 18.2393798828125, |
| "learning_rate": 6.896551724137931e-05, |
| "loss": 12.5616, |
| "num_tokens": 10988206.0, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.07291666666666667, |
| "grad_norm": 10.826237678527832, |
| "learning_rate": 8.275862068965517e-05, |
| "loss": 10.2014, |
| "num_tokens": 12735535.0, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.08333333333333333, |
| "grad_norm": 8.483694076538086, |
| "learning_rate": 9.655172413793105e-05, |
| "loss": 8.5755, |
| "num_tokens": 14570434.0, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.09375, |
| "grad_norm": 12.725581169128418, |
| "learning_rate": 0.0001103448275862069, |
| "loss": 7.9316, |
| "num_tokens": 16405103.0, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.10416666666666667, |
| "grad_norm": 4.175471782684326, |
| "learning_rate": 0.00012413793103448277, |
| "loss": 7.8781, |
| "num_tokens": 18239342.0, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.11458333333333333, |
| "grad_norm": 3.164414167404175, |
| "learning_rate": 0.00013793103448275863, |
| "loss": 7.5351, |
| "num_tokens": 20072748.0, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.125, |
| "grad_norm": 4.230194568634033, |
| "learning_rate": 0.00015172413793103449, |
| "loss": 7.0739, |
| "num_tokens": 21901095.0, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.13541666666666666, |
| "grad_norm": 6.29382848739624, |
| "learning_rate": 0.00016551724137931035, |
| "loss": 6.7846, |
| "num_tokens": 23677594.0, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.14583333333333334, |
| "grad_norm": 3.8753461837768555, |
| "learning_rate": 0.0001793103448275862, |
| "loss": 7.4952, |
| "num_tokens": 25512546.0, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.15625, |
| "grad_norm": 3.4796786308288574, |
| "learning_rate": 0.0001931034482758621, |
| "loss": 7.3153, |
| "num_tokens": 27347253.0, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.16666666666666666, |
| "grad_norm": 2.67903733253479, |
| "learning_rate": 0.00020689655172413795, |
| "loss": 7.5518, |
| "num_tokens": 29181547.0, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.17708333333333334, |
| "grad_norm": 2.4620280265808105, |
| "learning_rate": 0.0002206896551724138, |
| "loss": 7.2618, |
| "num_tokens": 31015010.0, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.1875, |
| "grad_norm": 1.8145155906677246, |
| "learning_rate": 0.00023448275862068965, |
| "loss": 6.7349, |
| "num_tokens": 32845451.0, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.19791666666666666, |
| "grad_norm": 3.3343679904937744, |
| "learning_rate": 0.00024827586206896553, |
| "loss": 5.6373, |
| "num_tokens": 34610156.0, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.20833333333333334, |
| "grad_norm": 2.3187055587768555, |
| "learning_rate": 0.00026206896551724137, |
| "loss": 7.0746, |
| "num_tokens": 36445159.0, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.21875, |
| "grad_norm": 2.6275839805603027, |
| "learning_rate": 0.00027586206896551725, |
| "loss": 7.2533, |
| "num_tokens": 38279897.0, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.22916666666666666, |
| "grad_norm": 1.9879604578018188, |
| "learning_rate": 0.00028965517241379314, |
| "loss": 7.107, |
| "num_tokens": 40114299.0, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.23958333333333334, |
| "grad_norm": 2.4393796920776367, |
| "learning_rate": 0.00030344827586206897, |
| "loss": 7.0006, |
| "num_tokens": 41948073.0, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.25, |
| "grad_norm": 2.269523859024048, |
| "learning_rate": 0.00031724137931034486, |
| "loss": 6.9205, |
| "num_tokens": 43779853.0, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.2604166666666667, |
| "grad_norm": 2.820974588394165, |
| "learning_rate": 0.0003310344827586207, |
| "loss": 4.5921, |
| "num_tokens": 45542999.0, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.2708333333333333, |
| "grad_norm": 2.643603801727295, |
| "learning_rate": 0.0003448275862068965, |
| "loss": 6.8071, |
| "num_tokens": 47378007.0, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.28125, |
| "grad_norm": 2.0994789600372314, |
| "learning_rate": 0.0003586206896551724, |
| "loss": 7.0465, |
| "num_tokens": 49212861.0, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.2916666666666667, |
| "grad_norm": 2.2678847312927246, |
| "learning_rate": 0.0003724137931034483, |
| "loss": 7.3026, |
| "num_tokens": 51047392.0, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.3020833333333333, |
| "grad_norm": 1.6589086055755615, |
| "learning_rate": 0.0003862068965517242, |
| "loss": 7.0678, |
| "num_tokens": 52881351.0, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.3125, |
| "grad_norm": 1.8188447952270508, |
| "learning_rate": 0.0004, |
| "loss": 6.9163, |
| "num_tokens": 54714034.0, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.3229166666666667, |
| "grad_norm": 2.9304006099700928, |
| "learning_rate": 0.000399985287214871, |
| "loss": 5.6612, |
| "num_tokens": 56533703.0, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.3333333333333333, |
| "grad_norm": 2.9317057132720947, |
| "learning_rate": 0.00039994115102414443, |
| "loss": 6.0697, |
| "num_tokens": 58295030.0, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.34375, |
| "grad_norm": 2.4129488468170166, |
| "learning_rate": 0.0003998675979214832, |
| "loss": 6.8908, |
| "num_tokens": 60129965.0, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.3541666666666667, |
| "grad_norm": 3.096867084503174, |
| "learning_rate": 0.0003997646387285973, |
| "loss": 7.3034, |
| "num_tokens": 61964623.0, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.3645833333333333, |
| "grad_norm": 2.161679744720459, |
| "learning_rate": 0.0003996322885936515, |
| "loss": 6.8816, |
| "num_tokens": 63798869.0, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.375, |
| "grad_norm": 1.8905034065246582, |
| "learning_rate": 0.00039947056698903674, |
| "loss": 6.8637, |
| "num_tokens": 65632233.0, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.3854166666666667, |
| "grad_norm": 1.978013277053833, |
| "learning_rate": 0.0003992794977085053, |
| "loss": 6.5995, |
| "num_tokens": 67461424.0, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.3958333333333333, |
| "grad_norm": 5.106634616851807, |
| "learning_rate": 0.0003990591088636698, |
| "loss": 5.2285, |
| "num_tokens": 69229515.0, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.40625, |
| "grad_norm": 2.3611295223236084, |
| "learning_rate": 0.0003988094328798676, |
| "loss": 6.8329, |
| "num_tokens": 71064475.0, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.4166666666666667, |
| "grad_norm": 1.393893837928772, |
| "learning_rate": 0.00039853050649138943, |
| "loss": 7.1497, |
| "num_tokens": 72899173.0, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.4270833333333333, |
| "grad_norm": 1.8147592544555664, |
| "learning_rate": 0.00039822237073607573, |
| "loss": 7.0103, |
| "num_tokens": 74733506.0, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.4375, |
| "grad_norm": 1.375647783279419, |
| "learning_rate": 0.0003978850709492779, |
| "loss": 6.8966, |
| "num_tokens": 76567089.0, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.4479166666666667, |
| "grad_norm": 1.8051817417144775, |
| "learning_rate": 0.000397518656757189, |
| "loss": 6.5224, |
| "num_tokens": 78397511.0, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.4583333333333333, |
| "grad_norm": 1.5351046323776245, |
| "learning_rate": 0.0003971231820695417, |
| "loss": 4.4472, |
| "num_tokens": 80155993.0, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.46875, |
| "grad_norm": 1.944486379623413, |
| "learning_rate": 0.000396698705071677, |
| "loss": 6.4955, |
| "num_tokens": 81991001.0, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.4791666666666667, |
| "grad_norm": 1.94477379322052, |
| "learning_rate": 0.0003962452882159836, |
| "loss": 6.9783, |
| "num_tokens": 83825755.0, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.4895833333333333, |
| "grad_norm": 1.7476928234100342, |
| "learning_rate": 0.0003957629982127092, |
| "loss": 7.0479, |
| "num_tokens": 85660194.0, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.5, |
| "grad_norm": 1.5802652835845947, |
| "learning_rate": 0.00039525190602014563, |
| "loss": 6.7967, |
| "num_tokens": 87494025.0, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.5104166666666666, |
| "grad_norm": 1.353808045387268, |
| "learning_rate": 0.00039471208683418895, |
| "loss": 6.5968, |
| "num_tokens": 89326018.0, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.5208333333333334, |
| "grad_norm": 1.2980613708496094, |
| "learning_rate": 0.00039414362007727616, |
| "loss": 4.6137, |
| "num_tokens": 91084381.0, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.53125, |
| "grad_norm": 1.5357342958450317, |
| "learning_rate": 0.0003935465893866998, |
| "loss": 6.4328, |
| "num_tokens": 92919389.0, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.5416666666666666, |
| "grad_norm": 2.6804656982421875, |
| "learning_rate": 0.0003929210826023024, |
| "loss": 6.8429, |
| "num_tokens": 94754271.0, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.5520833333333334, |
| "grad_norm": 1.387039303779602, |
| "learning_rate": 0.00039226719175355316, |
| "loss": 6.9105, |
| "num_tokens": 96588840.0, |
| "step": 53 |
| }, |
| { |
| "epoch": 0.5625, |
| "grad_norm": 2.764413356781006, |
| "learning_rate": 0.0003915850130460076, |
| "loss": 7.0522, |
| "num_tokens": 98422941.0, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.5729166666666666, |
| "grad_norm": 1.4721662998199463, |
| "learning_rate": 0.00039087464684715325, |
| "loss": 6.8557, |
| "num_tokens": 100255975.0, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.5833333333333334, |
| "grad_norm": 2.418269395828247, |
| "learning_rate": 0.0003901361976716425, |
| "loss": 5.7605, |
| "num_tokens": 102078387.0, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.59375, |
| "grad_norm": 2.094545602798462, |
| "learning_rate": 0.0003893697741659158, |
| "loss": 5.8441, |
| "num_tokens": 103850401.0, |
| "step": 57 |
| }, |
| { |
| "epoch": 0.6041666666666666, |
| "grad_norm": 2.203214645385742, |
| "learning_rate": 0.00038857548909221687, |
| "loss": 6.6665, |
| "num_tokens": 105685318.0, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.6145833333333334, |
| "grad_norm": 1.7559891939163208, |
| "learning_rate": 0.00038775345931200175, |
| "loss": 6.9711, |
| "num_tokens": 107519970.0, |
| "step": 59 |
| }, |
| { |
| "epoch": 0.625, |
| "grad_norm": 2.541926860809326, |
| "learning_rate": 0.00038690380576874585, |
| "loss": 6.8984, |
| "num_tokens": 109354164.0, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.6354166666666666, |
| "grad_norm": 1.761542797088623, |
| "learning_rate": 0.0003860266534701491, |
| "loss": 6.7849, |
| "num_tokens": 111187370.0, |
| "step": 61 |
| }, |
| { |
| "epoch": 0.6458333333333334, |
| "grad_norm": 1.7331949472427368, |
| "learning_rate": 0.00038512213146974476, |
| "loss": 5.7054, |
| "num_tokens": 113012336.0, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.65625, |
| "grad_norm": 1.1224706172943115, |
| "learning_rate": 0.00038419037284791093, |
| "loss": 5.0973, |
| "num_tokens": 114767666.0, |
| "step": 63 |
| }, |
| { |
| "epoch": 0.6666666666666666, |
| "grad_norm": 1.7167904376983643, |
| "learning_rate": 0.0003832315146922917, |
| "loss": 6.5043, |
| "num_tokens": 116602636.0, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.6770833333333334, |
| "grad_norm": 1.7402077913284302, |
| "learning_rate": 0.0003822456980776272, |
| "loss": 6.7829, |
| "num_tokens": 118437323.0, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.6875, |
| "grad_norm": 1.7316051721572876, |
| "learning_rate": 0.00038123306804499756, |
| "loss": 6.8131, |
| "num_tokens": 120271633.0, |
| "step": 66 |
| }, |
| { |
| "epoch": 0.6979166666666666, |
| "grad_norm": 1.579223871231079, |
| "learning_rate": 0.0003801937735804838, |
| "loss": 6.6637, |
| "num_tokens": 122105100.0, |
| "step": 67 |
| }, |
| { |
| "epoch": 0.7083333333333334, |
| "grad_norm": 1.7695430517196655, |
| "learning_rate": 0.0003791279675932473, |
| "loss": 6.311, |
| "num_tokens": 123935290.0, |
| "step": 68 |
| }, |
| { |
| "epoch": 0.71875, |
| "grad_norm": 1.319337248802185, |
| "learning_rate": 0.0003780358068930329, |
| "loss": 4.6914, |
| "num_tokens": 125671781.0, |
| "step": 69 |
| }, |
| { |
| "epoch": 0.7291666666666666, |
| "grad_norm": 1.9782460927963257, |
| "learning_rate": 0.00037691745216709754, |
| "loss": 6.4582, |
| "num_tokens": 127506783.0, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.7395833333333334, |
| "grad_norm": 1.099218726158142, |
| "learning_rate": 0.0003757730679565692, |
| "loss": 6.5452, |
| "num_tokens": 129341530.0, |
| "step": 71 |
| }, |
| { |
| "epoch": 0.75, |
| "grad_norm": 2.3009560108184814, |
| "learning_rate": 0.00037460282263223764, |
| "loss": 6.787, |
| "num_tokens": 131175924.0, |
| "step": 72 |
| }, |
| { |
| "epoch": 0.7604166666666666, |
| "grad_norm": 1.8008896112442017, |
| "learning_rate": 0.000373406888369783, |
| "loss": 6.7155, |
| "num_tokens": 133009659.0, |
| "step": 73 |
| }, |
| { |
| "epoch": 0.7708333333333334, |
| "grad_norm": 1.6004728078842163, |
| "learning_rate": 0.00037218544112444375, |
| "loss": 6.4821, |
| "num_tokens": 134841522.0, |
| "step": 74 |
| }, |
| { |
| "epoch": 0.78125, |
| "grad_norm": 1.2067142724990845, |
| "learning_rate": 0.00037093866060512834, |
| "loss": 3.9541, |
| "num_tokens": 136607712.0, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.7916666666666666, |
| "grad_norm": 1.222769856452942, |
| "learning_rate": 0.0003696667302479757, |
| "loss": 6.4826, |
| "num_tokens": 138442720.0, |
| "step": 76 |
| }, |
| { |
| "epoch": 0.8020833333333334, |
| "grad_norm": 2.030263900756836, |
| "learning_rate": 0.00036836983718936624, |
| "loss": 6.7221, |
| "num_tokens": 140277585.0, |
| "step": 77 |
| }, |
| { |
| "epoch": 0.8125, |
| "grad_norm": 1.1827516555786133, |
| "learning_rate": 0.00036704817223838905, |
| "loss": 6.97, |
| "num_tokens": 142112123.0, |
| "step": 78 |
| }, |
| { |
| "epoch": 0.8229166666666666, |
| "grad_norm": 1.8860087394714355, |
| "learning_rate": 0.00036570192984876847, |
| "loss": 6.776, |
| "num_tokens": 143946109.0, |
| "step": 79 |
| }, |
| { |
| "epoch": 0.8333333333333334, |
| "grad_norm": 1.259444236755371, |
| "learning_rate": 0.0003643313080902546, |
| "loss": 6.2958, |
| "num_tokens": 145778859.0, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.84375, |
| "grad_norm": 1.2004989385604858, |
| "learning_rate": 0.0003629365086194818, |
| "loss": 5.1451, |
| "num_tokens": 147600412.0, |
| "step": 81 |
| }, |
| { |
| "epoch": 0.8541666666666666, |
| "grad_norm": 0.9654695987701416, |
| "learning_rate": 0.000361517736650299, |
| "loss": 5.7, |
| "num_tokens": 149380352.0, |
| "step": 82 |
| }, |
| { |
| "epoch": 0.8645833333333334, |
| "grad_norm": 1.1376981735229492, |
| "learning_rate": 0.00036007520092357765, |
| "loss": 6.4369, |
| "num_tokens": 151215279.0, |
| "step": 83 |
| }, |
| { |
| "epoch": 0.875, |
| "grad_norm": 1.1264456510543823, |
| "learning_rate": 0.00035860911367649955, |
| "loss": 6.7863, |
| "num_tokens": 153049931.0, |
| "step": 84 |
| }, |
| { |
| "epoch": 0.8854166666666666, |
| "grad_norm": 1.1658936738967896, |
| "learning_rate": 0.0003571196906113313, |
| "loss": 6.6682, |
| "num_tokens": 154884077.0, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.8958333333333334, |
| "grad_norm": 1.4664779901504517, |
| "learning_rate": 0.00035560715086368794, |
| "loss": 6.5316, |
| "num_tokens": 156717218.0, |
| "step": 86 |
| }, |
| { |
| "epoch": 0.90625, |
| "grad_norm": 1.0775753259658813, |
| "learning_rate": 0.00035407171697029267, |
| "loss": 5.7963, |
| "num_tokens": 158544449.0, |
| "step": 87 |
| }, |
| { |
| "epoch": 0.9166666666666666, |
| "grad_norm": 0.9657304883003235, |
| "learning_rate": 0.0003525136148362349, |
| "loss": 4.7755, |
| "num_tokens": 160281956.0, |
| "step": 88 |
| }, |
| { |
| "epoch": 0.9270833333333334, |
| "grad_norm": 1.1560213565826416, |
| "learning_rate": 0.0003509330737017339, |
| "loss": 6.6126, |
| "num_tokens": 162116927.0, |
| "step": 89 |
| }, |
| { |
| "epoch": 0.9375, |
| "grad_norm": 1.2897595167160034, |
| "learning_rate": 0.0003493303261084105, |
| "loss": 6.7477, |
| "num_tokens": 163951622.0, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.9479166666666666, |
| "grad_norm": 1.758039951324463, |
| "learning_rate": 0.0003477056078650743, |
| "loss": 6.7067, |
| "num_tokens": 165785936.0, |
| "step": 91 |
| }, |
| { |
| "epoch": 0.9583333333333334, |
| "grad_norm": 1.2501920461654663, |
| "learning_rate": 0.0003460591580130295, |
| "loss": 6.4496, |
| "num_tokens": 167619514.0, |
| "step": 92 |
| }, |
| { |
| "epoch": 0.96875, |
| "grad_norm": 1.465528130531311, |
| "learning_rate": 0.0003443912187909049, |
| "loss": 6.4476, |
| "num_tokens": 169450177.0, |
| "step": 93 |
| }, |
| { |
| "epoch": 0.9791666666666666, |
| "grad_norm": 1.0671991109848022, |
| "learning_rate": 0.00034270203559901447, |
| "loss": 4.1311, |
| "num_tokens": 171227988.0, |
| "step": 94 |
| }, |
| { |
| "epoch": 0.9895833333333334, |
| "grad_norm": 1.8749101161956787, |
| "learning_rate": 0.0003409918569632517, |
| "loss": 6.7588, |
| "num_tokens": 173062297.0, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 0.9690254330635071, |
| "learning_rate": 0.00033926093449852444, |
| "loss": 5.5985, |
| "num_tokens": 174873718.0, |
| "step": 96 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 0.3894410729408264, |
| "eval_num_tokens": 174873718.0, |
| "eval_runtime": 30.0859, |
| "eval_samples_per_second": 73.589, |
| "eval_steps_per_second": 2.327, |
| "step": 96 |
| }, |
| { |
| "epoch": 1.0104166666666667, |
| "grad_norm": 1.3122996091842651, |
| "learning_rate": 0.00033750952287173576, |
| "loss": 5.7378, |
| "num_tokens": 176708726.0, |
| "step": 97 |
| }, |
| { |
| "epoch": 1.0208333333333333, |
| "grad_norm": 1.4362525939941406, |
| "learning_rate": 0.00033573787976431507, |
| "loss": 5.8922, |
| "num_tokens": 178543577.0, |
| "step": 98 |
| }, |
| { |
| "epoch": 1.03125, |
| "grad_norm": 1.188515067100525, |
| "learning_rate": 0.00033394626583430596, |
| "loss": 5.9712, |
| "num_tokens": 180378152.0, |
| "step": 99 |
| }, |
| { |
| "epoch": 1.0416666666666667, |
| "grad_norm": 1.1332318782806396, |
| "learning_rate": 0.0003321349446780163, |
| "loss": 6.0482, |
| "num_tokens": 182212167.0, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.0520833333333333, |
| "grad_norm": 1.1256293058395386, |
| "learning_rate": 0.0003303041827912359, |
| "loss": 5.65, |
| "num_tokens": 184044813.0, |
| "step": 101 |
| }, |
| { |
| "epoch": 1.0625, |
| "grad_norm": 1.1897281408309937, |
| "learning_rate": 0.0003284542495300272, |
| "loss": 4.2973, |
| "num_tokens": 185861645.0, |
| "step": 102 |
| }, |
| { |
| "epoch": 1.0729166666666667, |
| "grad_norm": 1.16110098361969, |
| "learning_rate": 0.00032658541707109614, |
| "loss": 5.0971, |
| "num_tokens": 187638317.0, |
| "step": 103 |
| }, |
| { |
| "epoch": 1.0833333333333333, |
| "grad_norm": 1.1937158107757568, |
| "learning_rate": 0.00032469796037174674, |
| "loss": 5.793, |
| "num_tokens": 189473221.0, |
| "step": 104 |
| }, |
| { |
| "epoch": 1.09375, |
| "grad_norm": 1.033004641532898, |
| "learning_rate": 0.00032279215712942755, |
| "loss": 5.8289, |
| "num_tokens": 191307863.0, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.1041666666666667, |
| "grad_norm": 1.2670934200286865, |
| "learning_rate": 0.0003208682877408746, |
| "loss": 5.9546, |
| "num_tokens": 193142059.0, |
| "step": 106 |
| }, |
| { |
| "epoch": 1.1145833333333333, |
| "grad_norm": 1.0049296617507935, |
| "learning_rate": 0.00031892663526085735, |
| "loss": 5.7162, |
| "num_tokens": 194975275.0, |
| "step": 107 |
| }, |
| { |
| "epoch": 1.125, |
| "grad_norm": 0.9133859872817993, |
| "learning_rate": 0.00031696748536053294, |
| "loss": 5.1661, |
| "num_tokens": 196803152.0, |
| "step": 108 |
| }, |
| { |
| "epoch": 1.1354166666666667, |
| "grad_norm": 0.7779310345649719, |
| "learning_rate": 0.0003149911262854166, |
| "loss": 4.029, |
| "num_tokens": 198575028.0, |
| "step": 109 |
| }, |
| { |
| "epoch": 1.1458333333333333, |
| "grad_norm": 0.9244178533554077, |
| "learning_rate": 0.00031299784881297246, |
| "loss": 5.6349, |
| "num_tokens": 200409979.0, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.15625, |
| "grad_norm": 0.9925755858421326, |
| "learning_rate": 0.0003109879462098321, |
| "loss": 5.8685, |
| "num_tokens": 202244654.0, |
| "step": 111 |
| }, |
| { |
| "epoch": 1.1666666666666667, |
| "grad_norm": 1.3348231315612793, |
| "learning_rate": 0.00030896171418864677, |
| "loss": 6.0688, |
| "num_tokens": 204078947.0, |
| "step": 112 |
| }, |
| { |
| "epoch": 1.1770833333333333, |
| "grad_norm": 0.9792305827140808, |
| "learning_rate": 0.00030691945086458, |
| "loss": 5.6361, |
| "num_tokens": 205912447.0, |
| "step": 113 |
| }, |
| { |
| "epoch": 1.1875, |
| "grad_norm": 0.8863200545310974, |
| "learning_rate": 0.00030486145671144635, |
| "loss": 5.4361, |
| "num_tokens": 207742842.0, |
| "step": 114 |
| }, |
| { |
| "epoch": 1.1979166666666667, |
| "grad_norm": 1.1211588382720947, |
| "learning_rate": 0.0003027880345175036, |
| "loss": 3.709, |
| "num_tokens": 209498845.0, |
| "step": 115 |
| }, |
| { |
| "epoch": 1.2083333333333333, |
| "grad_norm": 1.335170030593872, |
| "learning_rate": 0.000300699489340904, |
| "loss": 5.7128, |
| "num_tokens": 211333853.0, |
| "step": 116 |
| }, |
| { |
| "epoch": 1.21875, |
| "grad_norm": 1.0909032821655273, |
| "learning_rate": 0.00029859612846481164, |
| "loss": 5.8812, |
| "num_tokens": 213168584.0, |
| "step": 117 |
| }, |
| { |
| "epoch": 1.2291666666666667, |
| "grad_norm": 1.0990113019943237, |
| "learning_rate": 0.00029647826135219274, |
| "loss": 5.9633, |
| "num_tokens": 215002979.0, |
| "step": 118 |
| }, |
| { |
| "epoch": 1.2395833333333333, |
| "grad_norm": 1.090796709060669, |
| "learning_rate": 0.0002943461996002849, |
| "loss": 5.8026, |
| "num_tokens": 216836768.0, |
| "step": 119 |
| }, |
| { |
| "epoch": 1.25, |
| "grad_norm": 0.928265392780304, |
| "learning_rate": 0.00029220025689475243, |
| "loss": 5.5874, |
| "num_tokens": 218668686.0, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.2604166666666667, |
| "grad_norm": 0.9441875219345093, |
| "learning_rate": 0.00029004074896353467, |
| "loss": 3.7368, |
| "num_tokens": 220419497.0, |
| "step": 121 |
| }, |
| { |
| "epoch": 1.2708333333333333, |
| "grad_norm": 1.2522954940795898, |
| "learning_rate": 0.00028786799353039335, |
| "loss": 5.5598, |
| "num_tokens": 222254505.0, |
| "step": 122 |
| }, |
| { |
| "epoch": 1.28125, |
| "grad_norm": 1.1094729900360107, |
| "learning_rate": 0.00028568231026816673, |
| "loss": 5.953, |
| "num_tokens": 224089351.0, |
| "step": 123 |
| }, |
| { |
| "epoch": 1.2916666666666667, |
| "grad_norm": 1.1398054361343384, |
| "learning_rate": 0.00028348402075173683, |
| "loss": 6.1392, |
| "num_tokens": 225923865.0, |
| "step": 124 |
| }, |
| { |
| "epoch": 1.3020833333333333, |
| "grad_norm": 1.0812050104141235, |
| "learning_rate": 0.0002812734484107166, |
| "loss": 5.9734, |
| "num_tokens": 227757870.0, |
| "step": 125 |
| }, |
| { |
| "epoch": 1.3125, |
| "grad_norm": 1.1087563037872314, |
| "learning_rate": 0.00027905091848186476, |
| "loss": 5.7871, |
| "num_tokens": 229590660.0, |
| "step": 126 |
| }, |
| { |
| "epoch": 1.3229166666666667, |
| "grad_norm": 0.9097115993499756, |
| "learning_rate": 0.00027681675796123424, |
| "loss": 4.1933, |
| "num_tokens": 231409634.0, |
| "step": 127 |
| }, |
| { |
| "epoch": 1.3333333333333333, |
| "grad_norm": 1.0159878730773926, |
| "learning_rate": 0.00027457129555606176, |
| "loss": 4.9435, |
| "num_tokens": 233161046.0, |
| "step": 128 |
| }, |
| { |
| "epoch": 1.34375, |
| "grad_norm": 0.8809158205986023, |
| "learning_rate": 0.00027231486163640617, |
| "loss": 5.705, |
| "num_tokens": 234995958.0, |
| "step": 129 |
| }, |
| { |
| "epoch": 1.3541666666666667, |
| "grad_norm": 1.0225216150283813, |
| "learning_rate": 0.00027004778818654173, |
| "loss": 5.8159, |
| "num_tokens": 236830612.0, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.3645833333333333, |
| "grad_norm": 0.8581401109695435, |
| "learning_rate": 0.0002677704087561138, |
| "loss": 5.8147, |
| "num_tokens": 238664763.0, |
| "step": 131 |
| }, |
| { |
| "epoch": 1.375, |
| "grad_norm": 0.9191629886627197, |
| "learning_rate": 0.0002654830584110645, |
| "loss": 5.5898, |
| "num_tokens": 240497790.0, |
| "step": 132 |
| }, |
| { |
| "epoch": 1.3854166666666667, |
| "grad_norm": 0.7045547962188721, |
| "learning_rate": 0.0002631860736843352, |
| "loss": 4.9866, |
| "num_tokens": 242324229.0, |
| "step": 133 |
| }, |
| { |
| "epoch": 1.3958333333333333, |
| "grad_norm": 0.969253659248352, |
| "learning_rate": 0.00026087979252635335, |
| "loss": 3.9635, |
| "num_tokens": 244080663.0, |
| "step": 134 |
| }, |
| { |
| "epoch": 1.40625, |
| "grad_norm": 1.0385013818740845, |
| "learning_rate": 0.0002585645542553101, |
| "loss": 5.6901, |
| "num_tokens": 245915632.0, |
| "step": 135 |
| }, |
| { |
| "epoch": 1.4166666666666667, |
| "grad_norm": 0.8449141979217529, |
| "learning_rate": 0.0002562406995072375, |
| "loss": 6.1177, |
| "num_tokens": 247750331.0, |
| "step": 136 |
| }, |
| { |
| "epoch": 1.4270833333333333, |
| "grad_norm": 1.043533444404602, |
| "learning_rate": 0.00025390857018589135, |
| "loss": 5.8914, |
| "num_tokens": 249584639.0, |
| "step": 137 |
| }, |
| { |
| "epoch": 1.4375, |
| "grad_norm": 0.7260875701904297, |
| "learning_rate": 0.0002515685094124476, |
| "loss": 5.8013, |
| "num_tokens": 251418209.0, |
| "step": 138 |
| }, |
| { |
| "epoch": 1.4479166666666667, |
| "grad_norm": 1.4319143295288086, |
| "learning_rate": 0.00024922086147501977, |
| "loss": 5.4356, |
| "num_tokens": 253248977.0, |
| "step": 139 |
| }, |
| { |
| "epoch": 1.4583333333333333, |
| "grad_norm": 0.9194077849388123, |
| "learning_rate": 0.0002468659717780045, |
| "loss": 3.8217, |
| "num_tokens": 255022934.0, |
| "step": 140 |
| }, |
| { |
| "epoch": 1.46875, |
| "grad_norm": 0.960200846195221, |
| "learning_rate": 0.0002445041867912629, |
| "loss": 5.472, |
| "num_tokens": 256857942.0, |
| "step": 141 |
| }, |
| { |
| "epoch": 1.4791666666666667, |
| "grad_norm": 0.8519755601882935, |
| "learning_rate": 0.00024213585399914526, |
| "loss": 6.1054, |
| "num_tokens": 258692708.0, |
| "step": 142 |
| }, |
| { |
| "epoch": 1.4895833333333333, |
| "grad_norm": 0.9014315605163574, |
| "learning_rate": 0.00023976132184936648, |
| "loss": 5.9878, |
| "num_tokens": 260527136.0, |
| "step": 143 |
| }, |
| { |
| "epoch": 1.5, |
| "grad_norm": 0.8403896689414978, |
| "learning_rate": 0.00023738093970173955, |
| "loss": 5.8222, |
| "num_tokens": 262360894.0, |
| "step": 144 |
| }, |
| { |
| "epoch": 1.5104166666666665, |
| "grad_norm": 0.8343173861503601, |
| "learning_rate": 0.00023499505777677509, |
| "loss": 5.4862, |
| "num_tokens": 264192721.0, |
| "step": 145 |
| }, |
| { |
| "epoch": 1.5208333333333335, |
| "grad_norm": 0.7444643378257751, |
| "learning_rate": 0.00023260402710415418, |
| "loss": 3.7015, |
| "num_tokens": 265947523.0, |
| "step": 146 |
| }, |
| { |
| "epoch": 1.53125, |
| "grad_norm": 0.9729010462760925, |
| "learning_rate": 0.000230208199471082, |
| "loss": 5.4485, |
| "num_tokens": 267782531.0, |
| "step": 147 |
| }, |
| { |
| "epoch": 1.5416666666666665, |
| "grad_norm": 0.7512543797492981, |
| "learning_rate": 0.00022780792737053034, |
| "loss": 5.8811, |
| "num_tokens": 269617364.0, |
| "step": 148 |
| }, |
| { |
| "epoch": 1.5520833333333335, |
| "grad_norm": 3.1332297325134277, |
| "learning_rate": 0.00022540356394937577, |
| "loss": 6.0501, |
| "num_tokens": 271451886.0, |
| "step": 149 |
| }, |
| { |
| "epoch": 1.5625, |
| "grad_norm": 0.9920795559883118, |
| "learning_rate": 0.00022299546295644223, |
| "loss": 6.1201, |
| "num_tokens": 273285914.0, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.5729166666666665, |
| "grad_norm": 0.8172993659973145, |
| "learning_rate": 0.0002205839786904545, |
| "loss": 5.7807, |
| "num_tokens": 275118700.0, |
| "step": 151 |
| }, |
| { |
| "epoch": 1.5833333333333335, |
| "grad_norm": 0.7441129684448242, |
| "learning_rate": 0.00021816946594791102, |
| "loss": 4.3491, |
| "num_tokens": 276940098.0, |
| "step": 152 |
| }, |
| { |
| "epoch": 1.59375, |
| "grad_norm": 0.7993663549423218, |
| "learning_rate": 0.0002157522799708836, |
| "loss": 4.9735, |
| "num_tokens": 278712550.0, |
| "step": 153 |
| }, |
| { |
| "epoch": 1.6041666666666665, |
| "grad_norm": 0.8530566692352295, |
| "learning_rate": 0.00021333277639475106, |
| "loss": 5.7049, |
| "num_tokens": 280547478.0, |
| "step": 154 |
| }, |
| { |
| "epoch": 1.6145833333333335, |
| "grad_norm": 2.344912052154541, |
| "learning_rate": 0.0002109113111958759, |
| "loss": 5.953, |
| "num_tokens": 282382117.0, |
| "step": 155 |
| }, |
| { |
| "epoch": 1.625, |
| "grad_norm": 0.9242584109306335, |
| "learning_rate": 0.00020848824063922968, |
| "loss": 6.0551, |
| "num_tokens": 284216279.0, |
| "step": 156 |
| }, |
| { |
| "epoch": 1.6354166666666665, |
| "grad_norm": 0.7540212869644165, |
| "learning_rate": 0.000206063921225977, |
| "loss": 5.7292, |
| "num_tokens": 286049489.0, |
| "step": 157 |
| }, |
| { |
| "epoch": 1.6458333333333335, |
| "grad_norm": 0.7506616115570068, |
| "learning_rate": 0.00020363870964102394, |
| "loss": 4.8463, |
| "num_tokens": 287875672.0, |
| "step": 158 |
| }, |
| { |
| "epoch": 1.65625, |
| "grad_norm": 0.6766217350959778, |
| "learning_rate": 0.00020121296270053996, |
| "loss": 3.9504, |
| "num_tokens": 289638765.0, |
| "step": 159 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 0.8203697800636292, |
| "learning_rate": 0.00019878703729946012, |
| "loss": 5.651, |
| "num_tokens": 291473726.0, |
| "step": 160 |
| }, |
| { |
| "epoch": 1.6770833333333335, |
| "grad_norm": 0.8603063821792603, |
| "learning_rate": 0.0001963612903589761, |
| "loss": 5.7, |
| "num_tokens": 293308430.0, |
| "step": 161 |
| }, |
| { |
| "epoch": 1.6875, |
| "grad_norm": 0.868766725063324, |
| "learning_rate": 0.00019393607877402308, |
| "loss": 5.9626, |
| "num_tokens": 295142781.0, |
| "step": 162 |
| }, |
| { |
| "epoch": 1.6979166666666665, |
| "grad_norm": 0.8472209572792053, |
| "learning_rate": 0.0001915117593607704, |
| "loss": 5.5787, |
| "num_tokens": 296976392.0, |
| "step": 163 |
| }, |
| { |
| "epoch": 1.7083333333333335, |
| "grad_norm": 0.8462531566619873, |
| "learning_rate": 0.00018908868880412421, |
| "loss": 5.4115, |
| "num_tokens": 298807511.0, |
| "step": 164 |
| }, |
| { |
| "epoch": 1.71875, |
| "grad_norm": 0.7758305668830872, |
| "learning_rate": 0.000186667223605249, |
| "loss": 3.5523, |
| "num_tokens": 300577163.0, |
| "step": 165 |
| }, |
| { |
| "epoch": 1.7291666666666665, |
| "grad_norm": 0.994538426399231, |
| "learning_rate": 0.00018424772002911653, |
| "loss": 5.631, |
| "num_tokens": 302412171.0, |
| "step": 166 |
| }, |
| { |
| "epoch": 1.7395833333333335, |
| "grad_norm": 0.8604474663734436, |
| "learning_rate": 0.00018183053405208897, |
| "loss": 6.0156, |
| "num_tokens": 304246992.0, |
| "step": 167 |
| }, |
| { |
| "epoch": 1.75, |
| "grad_norm": 0.8348606824874878, |
| "learning_rate": 0.00017941602130954552, |
| "loss": 5.9339, |
| "num_tokens": 306081466.0, |
| "step": 168 |
| }, |
| { |
| "epoch": 1.7604166666666665, |
| "grad_norm": 0.8924731612205505, |
| "learning_rate": 0.0001770045370435578, |
| "loss": 5.7583, |
| "num_tokens": 307915323.0, |
| "step": 169 |
| }, |
| { |
| "epoch": 1.7708333333333335, |
| "grad_norm": 0.8333231210708618, |
| "learning_rate": 0.00017459643605062424, |
| "loss": 5.5967, |
| "num_tokens": 309747303.0, |
| "step": 170 |
| }, |
| { |
| "epoch": 1.78125, |
| "grad_norm": 0.7601279616355896, |
| "learning_rate": 0.00017219207262946973, |
| "loss": 3.6716, |
| "num_tokens": 311490666.0, |
| "step": 171 |
| }, |
| { |
| "epoch": 1.7916666666666665, |
| "grad_norm": 0.8671200275421143, |
| "learning_rate": 0.00016979180052891803, |
| "loss": 5.5499, |
| "num_tokens": 313325674.0, |
| "step": 172 |
| }, |
| { |
| "epoch": 1.8020833333333335, |
| "grad_norm": 0.7336496114730835, |
| "learning_rate": 0.00016739597289584587, |
| "loss": 5.6142, |
| "num_tokens": 315160558.0, |
| "step": 173 |
| }, |
| { |
| "epoch": 1.8125, |
| "grad_norm": 0.8112435340881348, |
| "learning_rate": 0.00016500494222322496, |
| "loss": 5.9554, |
| "num_tokens": 316995117.0, |
| "step": 174 |
| }, |
| { |
| "epoch": 1.8229166666666665, |
| "grad_norm": 0.7531298398971558, |
| "learning_rate": 0.0001626190602982605, |
| "loss": 5.9446, |
| "num_tokens": 318829146.0, |
| "step": 175 |
| }, |
| { |
| "epoch": 1.8333333333333335, |
| "grad_norm": 0.6773476600646973, |
| "learning_rate": 0.00016023867815063357, |
| "loss": 5.5058, |
| "num_tokens": 320661993.0, |
| "step": 176 |
| }, |
| { |
| "epoch": 1.84375, |
| "grad_norm": 0.6899229884147644, |
| "learning_rate": 0.0001578641460008548, |
| "loss": 4.4915, |
| "num_tokens": 322481903.0, |
| "step": 177 |
| }, |
| { |
| "epoch": 1.8541666666666665, |
| "grad_norm": 0.6477121710777283, |
| "learning_rate": 0.00015549581320873715, |
| "loss": 4.7383, |
| "num_tokens": 324251706.0, |
| "step": 178 |
| }, |
| { |
| "epoch": 1.8645833333333335, |
| "grad_norm": 0.6988059282302856, |
| "learning_rate": 0.00015313402822199554, |
| "loss": 5.755, |
| "num_tokens": 326086618.0, |
| "step": 179 |
| }, |
| { |
| "epoch": 1.875, |
| "grad_norm": 0.6862794160842896, |
| "learning_rate": 0.00015077913852498028, |
| "loss": 5.845, |
| "num_tokens": 327921258.0, |
| "step": 180 |
| }, |
| { |
| "epoch": 1.8854166666666665, |
| "grad_norm": 0.7074963450431824, |
| "learning_rate": 0.00014843149058755246, |
| "loss": 5.8368, |
| "num_tokens": 329755417.0, |
| "step": 181 |
| }, |
| { |
| "epoch": 1.8958333333333335, |
| "grad_norm": 0.7663176655769348, |
| "learning_rate": 0.00014609142981410867, |
| "loss": 5.6174, |
| "num_tokens": 331588668.0, |
| "step": 182 |
| }, |
| { |
| "epoch": 1.90625, |
| "grad_norm": 0.5961424708366394, |
| "learning_rate": 0.00014375930049276254, |
| "loss": 4.7983, |
| "num_tokens": 333416028.0, |
| "step": 183 |
| }, |
| { |
| "epoch": 1.9166666666666665, |
| "grad_norm": 0.6379178166389465, |
| "learning_rate": 0.00014143544574468994, |
| "loss": 4.3137, |
| "num_tokens": 335156923.0, |
| "step": 184 |
| }, |
| { |
| "epoch": 1.9270833333333335, |
| "grad_norm": 0.800960898399353, |
| "learning_rate": 0.0001391202074736467, |
| "loss": 5.7612, |
| "num_tokens": 336991901.0, |
| "step": 185 |
| }, |
| { |
| "epoch": 1.9375, |
| "grad_norm": 0.8264267444610596, |
| "learning_rate": 0.00013681392631566478, |
| "loss": 5.888, |
| "num_tokens": 338826608.0, |
| "step": 186 |
| }, |
| { |
| "epoch": 1.9479166666666665, |
| "grad_norm": 0.7922364473342896, |
| "learning_rate": 0.00013451694158893556, |
| "loss": 5.817, |
| "num_tokens": 340660903.0, |
| "step": 187 |
| }, |
| { |
| "epoch": 1.9583333333333335, |
| "grad_norm": 0.8555144667625427, |
| "learning_rate": 0.00013222959124388627, |
| "loss": 5.7542, |
| "num_tokens": 342494421.0, |
| "step": 188 |
| }, |
| { |
| "epoch": 1.96875, |
| "grad_norm": 0.691791832447052, |
| "learning_rate": 0.0001299522118134583, |
| "loss": 5.2777, |
| "num_tokens": 344324800.0, |
| "step": 189 |
| }, |
| { |
| "epoch": 1.9791666666666665, |
| "grad_norm": 0.655918300151825, |
| "learning_rate": 0.00012768513836359382, |
| "loss": 3.5671, |
| "num_tokens": 346097221.0, |
| "step": 190 |
| }, |
| { |
| "epoch": 1.9895833333333335, |
| "grad_norm": 0.9303130507469177, |
| "learning_rate": 0.00012542870444393831, |
| "loss": 5.6141, |
| "num_tokens": 347931646.0, |
| "step": 191 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 0.7559423446655273, |
| "learning_rate": 0.00012318324203876584, |
| "loss": 4.6556, |
| "num_tokens": 349747436.0, |
| "step": 192 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 0.29636919498443604, |
| "eval_num_tokens": 349747436.0, |
| "eval_runtime": 30.075, |
| "eval_samples_per_second": 73.616, |
| "eval_steps_per_second": 2.328, |
| "step": 192 |
| }, |
| { |
| "epoch": 2.0104166666666665, |
| "grad_norm": 0.7736667990684509, |
| "learning_rate": 0.00012094908151813528, |
| "loss": 4.8043, |
| "num_tokens": 351582444.0, |
| "step": 193 |
| }, |
| { |
| "epoch": 2.0208333333333335, |
| "grad_norm": 0.8747148513793945, |
| "learning_rate": 0.00011872655158928347, |
| "loss": 5.0255, |
| "num_tokens": 353417307.0, |
| "step": 194 |
| }, |
| { |
| "epoch": 2.03125, |
| "grad_norm": 0.8613985776901245, |
| "learning_rate": 0.00011651597924826328, |
| "loss": 5.0108, |
| "num_tokens": 355251875.0, |
| "step": 195 |
| }, |
| { |
| "epoch": 2.0416666666666665, |
| "grad_norm": 0.8411298990249634, |
| "learning_rate": 0.00011431768973183325, |
| "loss": 4.96, |
| "num_tokens": 357085971.0, |
| "step": 196 |
| }, |
| { |
| "epoch": 2.0520833333333335, |
| "grad_norm": 1.0294160842895508, |
| "learning_rate": 0.00011213200646960665, |
| "loss": 4.7198, |
| "num_tokens": 358919037.0, |
| "step": 197 |
| }, |
| { |
| "epoch": 2.0625, |
| "grad_norm": 0.7702895998954773, |
| "learning_rate": 0.00010995925103646532, |
| "loss": 3.7543, |
| "num_tokens": 360740077.0, |
| "step": 198 |
| }, |
| { |
| "epoch": 2.0729166666666665, |
| "grad_norm": 0.7157151699066162, |
| "learning_rate": 0.00010779974310524759, |
| "loss": 4.2185, |
| "num_tokens": 362500547.0, |
| "step": 199 |
| }, |
| { |
| "epoch": 2.0833333333333335, |
| "grad_norm": 0.7544373273849487, |
| "learning_rate": 0.00010565380039971513, |
| "loss": 4.8134, |
| "num_tokens": 364335437.0, |
| "step": 200 |
| }, |
| { |
| "epoch": 2.09375, |
| "grad_norm": 0.8497442603111267, |
| "learning_rate": 0.0001035217386478073, |
| "loss": 4.9739, |
| "num_tokens": 366170041.0, |
| "step": 201 |
| }, |
| { |
| "epoch": 2.1041666666666665, |
| "grad_norm": 0.9067506790161133, |
| "learning_rate": 0.00010140387153518838, |
| "loss": 4.9253, |
| "num_tokens": 368004177.0, |
| "step": 202 |
| }, |
| { |
| "epoch": 2.1145833333333335, |
| "grad_norm": 0.7545438408851624, |
| "learning_rate": 9.930051065909602e-05, |
| "loss": 4.8793, |
| "num_tokens": 369837409.0, |
| "step": 203 |
| }, |
| { |
| "epoch": 2.125, |
| "grad_norm": 0.6745933890342712, |
| "learning_rate": 9.721196548249643e-05, |
| "loss": 4.22, |
| "num_tokens": 371664713.0, |
| "step": 204 |
| }, |
| { |
| "epoch": 2.1354166666666665, |
| "grad_norm": 0.7445581555366516, |
| "learning_rate": 9.51385432885537e-05, |
| "loss": 3.3814, |
| "num_tokens": 373423781.0, |
| "step": 205 |
| }, |
| { |
| "epoch": 2.1458333333333335, |
| "grad_norm": 0.8161273002624512, |
| "learning_rate": 9.308054913542008e-05, |
| "loss": 4.681, |
| "num_tokens": 375258743.0, |
| "step": 206 |
| }, |
| { |
| "epoch": 2.15625, |
| "grad_norm": 0.7370818257331848, |
| "learning_rate": 9.10382858113533e-05, |
| "loss": 4.822, |
| "num_tokens": 377093447.0, |
| "step": 207 |
| }, |
| { |
| "epoch": 2.1666666666666665, |
| "grad_norm": 0.7451015710830688, |
| "learning_rate": 8.901205379016797e-05, |
| "loss": 5.0376, |
| "num_tokens": 378927784.0, |
| "step": 208 |
| }, |
| { |
| "epoch": 2.1770833333333335, |
| "grad_norm": 0.7379900217056274, |
| "learning_rate": 8.70021511870276e-05, |
| "loss": 4.6836, |
| "num_tokens": 380761347.0, |
| "step": 209 |
| }, |
| { |
| "epoch": 2.1875, |
| "grad_norm": 0.6172227263450623, |
| "learning_rate": 8.500887371458339e-05, |
| "loss": 4.3498, |
| "num_tokens": 382591921.0, |
| "step": 210 |
| }, |
| { |
| "epoch": 2.1979166666666665, |
| "grad_norm": 0.6761332750320435, |
| "learning_rate": 8.303251463946708e-05, |
| "loss": 3.0509, |
| "num_tokens": 384351003.0, |
| "step": 211 |
| }, |
| { |
| "epoch": 2.2083333333333335, |
| "grad_norm": 0.7814252376556396, |
| "learning_rate": 8.107336473914268e-05, |
| "loss": 4.5578, |
| "num_tokens": 386186011.0, |
| "step": 212 |
| }, |
| { |
| "epoch": 2.21875, |
| "grad_norm": 0.7495741248130798, |
| "learning_rate": 7.913171225912536e-05, |
| "loss": 4.7592, |
| "num_tokens": 388020770.0, |
| "step": 213 |
| }, |
| { |
| "epoch": 2.2291666666666665, |
| "grad_norm": 0.6622648239135742, |
| "learning_rate": 7.720784287057247e-05, |
| "loss": 4.9266, |
| "num_tokens": 389855172.0, |
| "step": 214 |
| }, |
| { |
| "epoch": 2.2395833333333335, |
| "grad_norm": 0.6832115054130554, |
| "learning_rate": 7.530203962825331e-05, |
| "loss": 4.7274, |
| "num_tokens": 391688847.0, |
| "step": 215 |
| }, |
| { |
| "epoch": 2.25, |
| "grad_norm": 0.6572868227958679, |
| "learning_rate": 7.34145829289039e-05, |
| "loss": 4.6805, |
| "num_tokens": 393520369.0, |
| "step": 216 |
| }, |
| { |
| "epoch": 2.2604166666666665, |
| "grad_norm": 0.7744179368019104, |
| "learning_rate": 7.154575046997282e-05, |
| "loss": 2.8113, |
| "num_tokens": 395290365.0, |
| "step": 217 |
| }, |
| { |
| "epoch": 2.2708333333333335, |
| "grad_norm": 0.6295082569122314, |
| "learning_rate": 6.969581720876419e-05, |
| "loss": 4.5662, |
| "num_tokens": 397125373.0, |
| "step": 218 |
| }, |
| { |
| "epoch": 2.28125, |
| "grad_norm": 0.6685689091682434, |
| "learning_rate": 6.786505532198374e-05, |
| "loss": 4.7907, |
| "num_tokens": 398960238.0, |
| "step": 219 |
| }, |
| { |
| "epoch": 2.2916666666666665, |
| "grad_norm": 0.6909879446029663, |
| "learning_rate": 6.605373416569411e-05, |
| "loss": 5.1753, |
| "num_tokens": 400794764.0, |
| "step": 220 |
| }, |
| { |
| "epoch": 2.3020833333333335, |
| "grad_norm": 0.6844865083694458, |
| "learning_rate": 6.4262120235685e-05, |
| "loss": 4.8683, |
| "num_tokens": 402628787.0, |
| "step": 221 |
| }, |
| { |
| "epoch": 2.3125, |
| "grad_norm": 0.6655349135398865, |
| "learning_rate": 6.249047712826433e-05, |
| "loss": 4.702, |
| "num_tokens": 404461539.0, |
| "step": 222 |
| }, |
| { |
| "epoch": 2.3229166666666665, |
| "grad_norm": 0.585425078868866, |
| "learning_rate": 6.073906550147566e-05, |
| "loss": 3.3928, |
| "num_tokens": 406279737.0, |
| "step": 223 |
| }, |
| { |
| "epoch": 2.3333333333333335, |
| "grad_norm": 0.5273310542106628, |
| "learning_rate": 5.900814303674842e-05, |
| "loss": 3.9698, |
| "num_tokens": 408070479.0, |
| "step": 224 |
| }, |
| { |
| "epoch": 2.34375, |
| "grad_norm": 0.618398129940033, |
| "learning_rate": 5.729796440098554e-05, |
| "loss": 4.7176, |
| "num_tokens": 409905369.0, |
| "step": 225 |
| }, |
| { |
| "epoch": 2.3541666666666665, |
| "grad_norm": 0.6818743944168091, |
| "learning_rate": 5.5608781209095114e-05, |
| "loss": 5.0674, |
| "num_tokens": 411739974.0, |
| "step": 226 |
| }, |
| { |
| "epoch": 2.3645833333333335, |
| "grad_norm": 0.6323108673095703, |
| "learning_rate": 5.394084198697056e-05, |
| "loss": 4.9298, |
| "num_tokens": 413574139.0, |
| "step": 227 |
| }, |
| { |
| "epoch": 2.375, |
| "grad_norm": 1.0775671005249023, |
| "learning_rate": 5.2294392134925704e-05, |
| "loss": 4.7817, |
| "num_tokens": 415407400.0, |
| "step": 228 |
| }, |
| { |
| "epoch": 2.3854166666666665, |
| "grad_norm": 0.5481901168823242, |
| "learning_rate": 5.066967389158954e-05, |
| "loss": 4.0538, |
| "num_tokens": 417234576.0, |
| "step": 229 |
| }, |
| { |
| "epoch": 2.3958333333333335, |
| "grad_norm": 0.5929054617881775, |
| "learning_rate": 4.9066926298266146e-05, |
| "loss": 3.3325, |
| "num_tokens": 418999605.0, |
| "step": 230 |
| }, |
| { |
| "epoch": 2.40625, |
| "grad_norm": 0.6156002283096313, |
| "learning_rate": 4.748638516376511e-05, |
| "loss": 4.7056, |
| "num_tokens": 420834554.0, |
| "step": 231 |
| }, |
| { |
| "epoch": 2.4166666666666665, |
| "grad_norm": 0.6369039416313171, |
| "learning_rate": 4.592828302970742e-05, |
| "loss": 4.9342, |
| "num_tokens": 422669261.0, |
| "step": 232 |
| }, |
| { |
| "epoch": 2.4270833333333335, |
| "grad_norm": 0.6354939341545105, |
| "learning_rate": 4.4392849136312145e-05, |
| "loss": 4.8467, |
| "num_tokens": 424503575.0, |
| "step": 233 |
| }, |
| { |
| "epoch": 2.4375, |
| "grad_norm": 0.6102489829063416, |
| "learning_rate": 4.288030938866881e-05, |
| "loss": 4.8394, |
| "num_tokens": 426337040.0, |
| "step": 234 |
| }, |
| { |
| "epoch": 2.4479166666666665, |
| "grad_norm": 0.5746884942054749, |
| "learning_rate": 4.13908863235005e-05, |
| "loss": 4.4795, |
| "num_tokens": 428167608.0, |
| "step": 235 |
| }, |
| { |
| "epoch": 2.4583333333333335, |
| "grad_norm": 0.7082644701004028, |
| "learning_rate": 3.9924799076422414e-05, |
| "loss": 2.9116, |
| "num_tokens": 429903970.0, |
| "step": 236 |
| }, |
| { |
| "epoch": 2.46875, |
| "grad_norm": 0.6072261929512024, |
| "learning_rate": 3.8482263349701084e-05, |
| "loss": 4.6498, |
| "num_tokens": 431738978.0, |
| "step": 237 |
| }, |
| { |
| "epoch": 2.4791666666666665, |
| "grad_norm": 0.6212684512138367, |
| "learning_rate": 3.706349138051828e-05, |
| "loss": 4.8417, |
| "num_tokens": 433573808.0, |
| "step": 238 |
| }, |
| { |
| "epoch": 2.4895833333333335, |
| "grad_norm": 0.6547099947929382, |
| "learning_rate": 3.5668691909745425e-05, |
| "loss": 5.1912, |
| "num_tokens": 435408292.0, |
| "step": 239 |
| }, |
| { |
| "epoch": 2.5, |
| "grad_norm": 0.6425914764404297, |
| "learning_rate": 3.429807015123159e-05, |
| "loss": 4.6938, |
| "num_tokens": 437242213.0, |
| "step": 240 |
| }, |
| { |
| "epoch": 2.5104166666666665, |
| "grad_norm": 0.6143184304237366, |
| "learning_rate": 3.295182776161103e-05, |
| "loss": 4.5731, |
| "num_tokens": 439074391.0, |
| "step": 241 |
| }, |
| { |
| "epoch": 2.5208333333333335, |
| "grad_norm": 0.676337480545044, |
| "learning_rate": 3.1630162810633824e-05, |
| "loss": 3.0502, |
| "num_tokens": 440837083.0, |
| "step": 242 |
| }, |
| { |
| "epoch": 2.53125, |
| "grad_norm": 0.5644184947013855, |
| "learning_rate": 3.0333269752024374e-05, |
| "loss": 4.4939, |
| "num_tokens": 442672091.0, |
| "step": 243 |
| }, |
| { |
| "epoch": 2.5416666666666665, |
| "grad_norm": 0.6266845464706421, |
| "learning_rate": 2.9061339394871723e-05, |
| "loss": 4.7266, |
| "num_tokens": 444506945.0, |
| "step": 244 |
| }, |
| { |
| "epoch": 2.5520833333333335, |
| "grad_norm": 0.6021518111228943, |
| "learning_rate": 2.7814558875556305e-05, |
| "loss": 4.8807, |
| "num_tokens": 446341484.0, |
| "step": 245 |
| }, |
| { |
| "epoch": 2.5625, |
| "grad_norm": 0.6042218804359436, |
| "learning_rate": 2.659311163021694e-05, |
| "loss": 4.8493, |
| "num_tokens": 448175477.0, |
| "step": 246 |
| }, |
| { |
| "epoch": 2.5729166666666665, |
| "grad_norm": 0.6365882754325867, |
| "learning_rate": 2.539717736776237e-05, |
| "loss": 4.7206, |
| "num_tokens": 450008278.0, |
| "step": 247 |
| }, |
| { |
| "epoch": 2.5833333333333335, |
| "grad_norm": 0.6301949620246887, |
| "learning_rate": 2.422693204343085e-05, |
| "loss": 3.6696, |
| "num_tokens": 451829106.0, |
| "step": 248 |
| }, |
| { |
| "epoch": 2.59375, |
| "grad_norm": 0.5256466865539551, |
| "learning_rate": 2.308254783290247e-05, |
| "loss": 3.8564, |
| "num_tokens": 453596312.0, |
| "step": 249 |
| }, |
| { |
| "epoch": 2.6041666666666665, |
| "grad_norm": 0.599315881729126, |
| "learning_rate": 2.1964193106967135e-05, |
| "loss": 4.7615, |
| "num_tokens": 455431228.0, |
| "step": 250 |
| }, |
| { |
| "epoch": 2.6145833333333335, |
| "grad_norm": 0.6404199004173279, |
| "learning_rate": 2.0872032406752686e-05, |
| "loss": 4.8996, |
| "num_tokens": 457265875.0, |
| "step": 251 |
| }, |
| { |
| "epoch": 2.625, |
| "grad_norm": 0.602830708026886, |
| "learning_rate": 1.9806226419516192e-05, |
| "loss": 4.9976, |
| "num_tokens": 459100000.0, |
| "step": 252 |
| }, |
| { |
| "epoch": 2.6354166666666665, |
| "grad_norm": 0.5732765793800354, |
| "learning_rate": 1.8766931955002455e-05, |
| "loss": 4.6165, |
| "num_tokens": 460933143.0, |
| "step": 253 |
| }, |
| { |
| "epoch": 2.6458333333333335, |
| "grad_norm": 0.5466137528419495, |
| "learning_rate": 1.775430192237284e-05, |
| "loss": 4.0161, |
| "num_tokens": 462759637.0, |
| "step": 254 |
| }, |
| { |
| "epoch": 2.65625, |
| "grad_norm": 0.6525053381919861, |
| "learning_rate": 1.6768485307708292e-05, |
| "loss": 3.1332, |
| "num_tokens": 464521875.0, |
| "step": 255 |
| }, |
| { |
| "epoch": 2.6666666666666665, |
| "grad_norm": 0.5667853951454163, |
| "learning_rate": 1.5809627152089068e-05, |
| "loss": 4.5937, |
| "num_tokens": 466356841.0, |
| "step": 256 |
| }, |
| { |
| "epoch": 2.6770833333333335, |
| "grad_norm": 0.623957097530365, |
| "learning_rate": 1.4877868530255279e-05, |
| "loss": 4.902, |
| "num_tokens": 468191542.0, |
| "step": 257 |
| }, |
| { |
| "epoch": 2.6875, |
| "grad_norm": 0.5726503729820251, |
| "learning_rate": 1.3973346529850872e-05, |
| "loss": 4.8586, |
| "num_tokens": 470025845.0, |
| "step": 258 |
| }, |
| { |
| "epoch": 2.6979166666666665, |
| "grad_norm": 0.6280783414840698, |
| "learning_rate": 1.3096194231254212e-05, |
| "loss": 4.6986, |
| "num_tokens": 471859357.0, |
| "step": 259 |
| }, |
| { |
| "epoch": 2.7083333333333335, |
| "grad_norm": 0.5749726295471191, |
| "learning_rate": 1.2246540687998264e-05, |
| "loss": 4.3927, |
| "num_tokens": 473689544.0, |
| "step": 260 |
| }, |
| { |
| "epoch": 2.71875, |
| "grad_norm": 0.7261371612548828, |
| "learning_rate": 1.142451090778316e-05, |
| "loss": 2.8678, |
| "num_tokens": 475418561.0, |
| "step": 261 |
| }, |
| { |
| "epoch": 2.7291666666666665, |
| "grad_norm": 0.5542764067649841, |
| "learning_rate": 1.0630225834084196e-05, |
| "loss": 4.7789, |
| "num_tokens": 477253569.0, |
| "step": 262 |
| }, |
| { |
| "epoch": 2.7395833333333335, |
| "grad_norm": 0.5651426911354065, |
| "learning_rate": 9.86380232835753e-06, |
| "loss": 4.7485, |
| "num_tokens": 479088363.0, |
| "step": 263 |
| }, |
| { |
| "epoch": 2.75, |
| "grad_norm": 0.5900463461875916, |
| "learning_rate": 9.125353152846794e-06, |
| "loss": 4.9906, |
| "num_tokens": 480922811.0, |
| "step": 264 |
| }, |
| { |
| "epoch": 2.7604166666666665, |
| "grad_norm": 1.269209861755371, |
| "learning_rate": 8.41498695399241e-06, |
| "loss": 4.9856, |
| "num_tokens": 482756611.0, |
| "step": 265 |
| }, |
| { |
| "epoch": 2.7708333333333335, |
| "grad_norm": 0.5450060367584229, |
| "learning_rate": 7.732808246446887e-06, |
| "loss": 4.4725, |
| "num_tokens": 484588510.0, |
| "step": 266 |
| }, |
| { |
| "epoch": 2.78125, |
| "grad_norm": 0.6516556143760681, |
| "learning_rate": 7.07891739769766e-06, |
| "loss": 2.9581, |
| "num_tokens": 486359532.0, |
| "step": 267 |
| }, |
| { |
| "epoch": 2.7916666666666665, |
| "grad_norm": 0.5549366474151611, |
| "learning_rate": 6.453410613300226e-06, |
| "loss": 4.5659, |
| "num_tokens": 488194540.0, |
| "step": 268 |
| }, |
| { |
| "epoch": 2.8020833333333335, |
| "grad_norm": 0.5634523034095764, |
| "learning_rate": 5.856379922723809e-06, |
| "loss": 4.7761, |
| "num_tokens": 490029403.0, |
| "step": 269 |
| }, |
| { |
| "epoch": 2.8125, |
| "grad_norm": 0.5804418325424194, |
| "learning_rate": 5.2879131658110535e-06, |
| "loss": 4.8276, |
| "num_tokens": 491863932.0, |
| "step": 270 |
| }, |
| { |
| "epoch": 2.8229166666666665, |
| "grad_norm": 0.6010308265686035, |
| "learning_rate": 4.748093979854429e-06, |
| "loss": 4.7014, |
| "num_tokens": 493697972.0, |
| "step": 271 |
| }, |
| { |
| "epoch": 2.8333333333333335, |
| "grad_norm": 0.5664845108985901, |
| "learning_rate": 4.237001787290851e-06, |
| "loss": 4.6276, |
| "num_tokens": 495530692.0, |
| "step": 272 |
| }, |
| { |
| "epoch": 2.84375, |
| "grad_norm": 0.6984363198280334, |
| "learning_rate": 3.754711784016407e-06, |
| "loss": 3.4077, |
| "num_tokens": 497346740.0, |
| "step": 273 |
| }, |
| { |
| "epoch": 2.8541666666666665, |
| "grad_norm": 0.5046265721321106, |
| "learning_rate": 3.3012949283229845e-06, |
| "loss": 3.868, |
| "num_tokens": 499103794.0, |
| "step": 274 |
| }, |
| { |
| "epoch": 2.8645833333333335, |
| "grad_norm": 1.1513460874557495, |
| "learning_rate": 2.8768179304583085e-06, |
| "loss": 4.6177, |
| "num_tokens": 500938681.0, |
| "step": 275 |
| }, |
| { |
| "epoch": 2.875, |
| "grad_norm": 0.8701567649841309, |
| "learning_rate": 2.481343242810996e-06, |
| "loss": 4.7644, |
| "num_tokens": 502773322.0, |
| "step": 276 |
| }, |
| { |
| "epoch": 2.8854166666666665, |
| "grad_norm": 0.6001777052879333, |
| "learning_rate": 2.1149290507220808e-06, |
| "loss": 4.8029, |
| "num_tokens": 504607491.0, |
| "step": 277 |
| }, |
| { |
| "epoch": 2.8958333333333335, |
| "grad_norm": 0.5649986863136292, |
| "learning_rate": 1.7776292639243074e-06, |
| "loss": 4.6981, |
| "num_tokens": 506440687.0, |
| "step": 278 |
| }, |
| { |
| "epoch": 2.90625, |
| "grad_norm": 0.5244946479797363, |
| "learning_rate": 1.4694935086105865e-06, |
| "loss": 3.9951, |
| "num_tokens": 508268714.0, |
| "step": 279 |
| }, |
| { |
| "epoch": 2.9166666666666665, |
| "grad_norm": 0.6372843980789185, |
| "learning_rate": 1.1905671201324576e-06, |
| "loss": 3.2659, |
| "num_tokens": 510061733.0, |
| "step": 280 |
| }, |
| { |
| "epoch": 2.9270833333333335, |
| "grad_norm": 0.595647394657135, |
| "learning_rate": 9.408911363301576e-07, |
| "loss": 4.4732, |
| "num_tokens": 511896706.0, |
| "step": 281 |
| }, |
| { |
| "epoch": 2.9375, |
| "grad_norm": 0.6154844760894775, |
| "learning_rate": 7.205022914946957e-07, |
| "loss": 5.0028, |
| "num_tokens": 513731401.0, |
| "step": 282 |
| }, |
| { |
| "epoch": 2.9479166666666665, |
| "grad_norm": 0.577378511428833, |
| "learning_rate": 5.29433010963265e-07, |
| "loss": 4.9479, |
| "num_tokens": 515565715.0, |
| "step": 283 |
| }, |
| { |
| "epoch": 2.9583333333333335, |
| "grad_norm": 0.6034528613090515, |
| "learning_rate": 3.677114063485476e-07, |
| "loss": 4.7164, |
| "num_tokens": 517399223.0, |
| "step": 284 |
| }, |
| { |
| "epoch": 2.96875, |
| "grad_norm": 0.5388755202293396, |
| "learning_rate": 2.3536127140273422e-07, |
| "loss": 4.3787, |
| "num_tokens": 519229822.0, |
| "step": 285 |
| }, |
| { |
| "epoch": 2.9791666666666665, |
| "grad_norm": 0.6161792874336243, |
| "learning_rate": 1.324020785168134e-07, |
| "loss": 3.1362, |
| "num_tokens": 520970038.0, |
| "step": 286 |
| }, |
| { |
| "epoch": 2.9895833333333335, |
| "grad_norm": 0.559122622013092, |
| "learning_rate": 5.884897585557436e-08, |
| "loss": 4.7854, |
| "num_tokens": 522804448.0, |
| "step": 287 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 0.5243895649909973, |
| "learning_rate": 1.4712785129011863e-08, |
| "loss": 4.0231, |
| "num_tokens": 524621154.0, |
| "step": 288 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 0.27166327834129333, |
| "eval_num_tokens": 524621154.0, |
| "eval_runtime": 29.6038, |
| "eval_samples_per_second": 74.788, |
| "eval_steps_per_second": 2.365, |
| "step": 288 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 288, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.5772065954768355e+19, |
| "train_batch_size": 28, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|