| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 441, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.034013605442176874, | |
| "grad_norm": 2.7028610706329346, | |
| "learning_rate": 4.998414279387743e-05, | |
| "loss": 6.0793, | |
| "num_input_tokens_seen": 3168, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.06802721088435375, | |
| "grad_norm": 2.967193603515625, | |
| "learning_rate": 4.9936591291588586e-05, | |
| "loss": 5.4017, | |
| "num_input_tokens_seen": 6544, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10204081632653061, | |
| "grad_norm": 1.1446479558944702, | |
| "learning_rate": 4.985740581585134e-05, | |
| "loss": 4.3102, | |
| "num_input_tokens_seen": 10464, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.1360544217687075, | |
| "grad_norm": 1.7880395650863647, | |
| "learning_rate": 4.9746686819498546e-05, | |
| "loss": 4.232, | |
| "num_input_tokens_seen": 13296, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.17006802721088435, | |
| "grad_norm": 2.237711191177368, | |
| "learning_rate": 4.960457475804594e-05, | |
| "loss": 3.9065, | |
| "num_input_tokens_seen": 16320, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.20408163265306123, | |
| "grad_norm": 1.7026973962783813, | |
| "learning_rate": 4.94312499115136e-05, | |
| "loss": 3.8348, | |
| "num_input_tokens_seen": 19504, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.23809523809523808, | |
| "grad_norm": 1.4949020147323608, | |
| "learning_rate": 4.922693215572695e-05, | |
| "loss": 3.5761, | |
| "num_input_tokens_seen": 22704, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.272108843537415, | |
| "grad_norm": 2.234846830368042, | |
| "learning_rate": 4.899188068338743e-05, | |
| "loss": 3.7531, | |
| "num_input_tokens_seen": 25728, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.30612244897959184, | |
| "grad_norm": 1.7678470611572266, | |
| "learning_rate": 4.8726393675266716e-05, | |
| "loss": 3.5748, | |
| "num_input_tokens_seen": 29024, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.3401360544217687, | |
| "grad_norm": 1.8059829473495483, | |
| "learning_rate": 4.84308079219417e-05, | |
| "loss": 3.772, | |
| "num_input_tokens_seen": 32464, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3741496598639456, | |
| "grad_norm": 2.300635576248169, | |
| "learning_rate": 4.810549839654973e-05, | |
| "loss": 3.807, | |
| "num_input_tokens_seen": 35424, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.40816326530612246, | |
| "grad_norm": 2.1998870372772217, | |
| "learning_rate": 4.7750877779106666e-05, | |
| "loss": 3.5115, | |
| "num_input_tokens_seen": 38208, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4421768707482993, | |
| "grad_norm": 1.6165343523025513, | |
| "learning_rate": 4.736739593299058e-05, | |
| "loss": 3.7613, | |
| "num_input_tokens_seen": 41280, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.47619047619047616, | |
| "grad_norm": 2.3959059715270996, | |
| "learning_rate": 4.6955539334255716e-05, | |
| "loss": 3.6662, | |
| "num_input_tokens_seen": 44144, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5102040816326531, | |
| "grad_norm": 1.5641539096832275, | |
| "learning_rate": 4.651583045450041e-05, | |
| "loss": 3.627, | |
| "num_input_tokens_seen": 47120, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.54421768707483, | |
| "grad_norm": 1.9732143878936768, | |
| "learning_rate": 4.604882709807187e-05, | |
| "loss": 3.6502, | |
| "num_input_tokens_seen": 50272, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5782312925170068, | |
| "grad_norm": 2.6501049995422363, | |
| "learning_rate": 4.5555121694448735e-05, | |
| "loss": 3.7163, | |
| "num_input_tokens_seen": 53056, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.6122448979591837, | |
| "grad_norm": 1.9555385112762451, | |
| "learning_rate": 4.503534054669892e-05, | |
| "loss": 3.6891, | |
| "num_input_tokens_seen": 55968, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6462585034013606, | |
| "grad_norm": 1.728500485420227, | |
| "learning_rate": 4.44901430369663e-05, | |
| "loss": 3.6063, | |
| "num_input_tokens_seen": 58944, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.6802721088435374, | |
| "grad_norm": 1.6137477159500122, | |
| "learning_rate": 4.392022078999405e-05, | |
| "loss": 3.5177, | |
| "num_input_tokens_seen": 61872, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 1.7164677381515503, | |
| "learning_rate": 4.332629679574566e-05, | |
| "loss": 3.376, | |
| "num_input_tokens_seen": 64704, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.7482993197278912, | |
| "grad_norm": 1.568602442741394, | |
| "learning_rate": 4.270912449223699e-05, | |
| "loss": 3.4565, | |
| "num_input_tokens_seen": 67776, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.782312925170068, | |
| "grad_norm": 2.5001213550567627, | |
| "learning_rate": 4.206948680974242e-05, | |
| "loss": 3.6683, | |
| "num_input_tokens_seen": 70784, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.8163265306122449, | |
| "grad_norm": 2.0534141063690186, | |
| "learning_rate": 4.140819517758795e-05, | |
| "loss": 3.5794, | |
| "num_input_tokens_seen": 74336, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8503401360544217, | |
| "grad_norm": 2.1421139240264893, | |
| "learning_rate": 4.072608849479106e-05, | |
| "loss": 3.6307, | |
| "num_input_tokens_seen": 77232, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.8843537414965986, | |
| "grad_norm": 1.939579963684082, | |
| "learning_rate": 4.002403206585307e-05, | |
| "loss": 3.5575, | |
| "num_input_tokens_seen": 80352, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9183673469387755, | |
| "grad_norm": 2.4437503814697266, | |
| "learning_rate": 3.9302916503054246e-05, | |
| "loss": 3.5487, | |
| "num_input_tokens_seen": 84016, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 2.416694164276123, | |
| "learning_rate": 3.856365659664399e-05, | |
| "loss": 3.6761, | |
| "num_input_tokens_seen": 87120, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9863945578231292, | |
| "grad_norm": 2.3587541580200195, | |
| "learning_rate": 3.780719015435943e-05, | |
| "loss": 3.1846, | |
| "num_input_tokens_seen": 90544, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.0204081632653061, | |
| "grad_norm": 2.34908390045166, | |
| "learning_rate": 3.703447681174458e-05, | |
| "loss": 3.2783, | |
| "num_input_tokens_seen": 94712, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.054421768707483, | |
| "grad_norm": 2.1918578147888184, | |
| "learning_rate": 3.624649681477923e-05, | |
| "loss": 3.494, | |
| "num_input_tokens_seen": 97912, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.08843537414966, | |
| "grad_norm": 2.6867949962615967, | |
| "learning_rate": 3.544424977636198e-05, | |
| "loss": 3.2551, | |
| "num_input_tokens_seen": 100696, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.1224489795918366, | |
| "grad_norm": 3.030991315841675, | |
| "learning_rate": 3.4628753408224765e-05, | |
| "loss": 3.4821, | |
| "num_input_tokens_seen": 104552, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.1564625850340136, | |
| "grad_norm": 2.9311370849609375, | |
| "learning_rate": 3.3801042229887756e-05, | |
| "loss": 3.4083, | |
| "num_input_tokens_seen": 107800, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.1904761904761905, | |
| "grad_norm": 2.682126760482788, | |
| "learning_rate": 3.2962166256292113e-05, | |
| "loss": 3.4593, | |
| "num_input_tokens_seen": 110840, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.2244897959183674, | |
| "grad_norm": 2.8780510425567627, | |
| "learning_rate": 3.211318966577581e-05, | |
| "loss": 3.5256, | |
| "num_input_tokens_seen": 113640, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.2585034013605443, | |
| "grad_norm": 2.4468441009521484, | |
| "learning_rate": 3.1255189450081977e-05, | |
| "loss": 2.9337, | |
| "num_input_tokens_seen": 117752, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.2925170068027212, | |
| "grad_norm": 2.369248628616333, | |
| "learning_rate": 3.0389254048112493e-05, | |
| "loss": 3.2217, | |
| "num_input_tokens_seen": 121512, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.3265306122448979, | |
| "grad_norm": 1.5555424690246582, | |
| "learning_rate": 2.9516481965159975e-05, | |
| "loss": 3.4546, | |
| "num_input_tokens_seen": 124840, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.3605442176870748, | |
| "grad_norm": 2.8310885429382324, | |
| "learning_rate": 2.863798037936983e-05, | |
| "loss": 3.559, | |
| "num_input_tokens_seen": 127960, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.3945578231292517, | |
| "grad_norm": 2.4837112426757812, | |
| "learning_rate": 2.775486373720003e-05, | |
| "loss": 3.2782, | |
| "num_input_tokens_seen": 130968, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 2.7569165229797363, | |
| "learning_rate": 2.686825233966061e-05, | |
| "loss": 3.4279, | |
| "num_input_tokens_seen": 133736, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.4625850340136055, | |
| "grad_norm": 2.901097297668457, | |
| "learning_rate": 2.597927092112607e-05, | |
| "loss": 3.3297, | |
| "num_input_tokens_seen": 136552, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 1.4965986394557822, | |
| "grad_norm": 2.221858263015747, | |
| "learning_rate": 2.5089047222523838e-05, | |
| "loss": 3.2559, | |
| "num_input_tokens_seen": 139832, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.5306122448979593, | |
| "grad_norm": 2.864755153656006, | |
| "learning_rate": 2.419871056070862e-05, | |
| "loss": 3.1332, | |
| "num_input_tokens_seen": 142840, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 1.564625850340136, | |
| "grad_norm": 2.6603639125823975, | |
| "learning_rate": 2.3309390395837633e-05, | |
| "loss": 3.4912, | |
| "num_input_tokens_seen": 145704, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.598639455782313, | |
| "grad_norm": 1.888742446899414, | |
| "learning_rate": 2.2422214898563916e-05, | |
| "loss": 3.4511, | |
| "num_input_tokens_seen": 148632, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 1.6326530612244898, | |
| "grad_norm": 2.04915714263916, | |
| "learning_rate": 2.1538309518865646e-05, | |
| "loss": 3.4876, | |
| "num_input_tokens_seen": 151608, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 2.1959643363952637, | |
| "learning_rate": 2.0658795558326743e-05, | |
| "loss": 3.474, | |
| "num_input_tokens_seen": 154936, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 1.7006802721088436, | |
| "grad_norm": 2.2664616107940674, | |
| "learning_rate": 1.9784788747679982e-05, | |
| "loss": 3.2019, | |
| "num_input_tokens_seen": 157912, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.7346938775510203, | |
| "grad_norm": 2.486283540725708, | |
| "learning_rate": 1.8917397831417286e-05, | |
| "loss": 3.2353, | |
| "num_input_tokens_seen": 160536, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 1.7687074829931972, | |
| "grad_norm": 2.9121577739715576, | |
| "learning_rate": 1.80577231612625e-05, | |
| "loss": 3.3723, | |
| "num_input_tokens_seen": 163976, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.8027210884353742, | |
| "grad_norm": 3.1636898517608643, | |
| "learning_rate": 1.720685530029105e-05, | |
| "loss": 3.3386, | |
| "num_input_tokens_seen": 167144, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 1.836734693877551, | |
| "grad_norm": 3.5455474853515625, | |
| "learning_rate": 1.6365873639467315e-05, | |
| "loss": 3.4695, | |
| "num_input_tokens_seen": 169752, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.870748299319728, | |
| "grad_norm": 2.9391326904296875, | |
| "learning_rate": 1.553584502835456e-05, | |
| "loss": 3.0469, | |
| "num_input_tokens_seen": 173416, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 1.9047619047619047, | |
| "grad_norm": 2.9568731784820557, | |
| "learning_rate": 1.4717822421734718e-05, | |
| "loss": 3.3184, | |
| "num_input_tokens_seen": 176392, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.9387755102040818, | |
| "grad_norm": 3.0478389263153076, | |
| "learning_rate": 1.3912843543854664e-05, | |
| "loss": 3.3918, | |
| "num_input_tokens_seen": 179272, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 1.9727891156462585, | |
| "grad_norm": 2.8810529708862305, | |
| "learning_rate": 1.3121929571993685e-05, | |
| "loss": 3.5476, | |
| "num_input_tokens_seen": 182616, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.006802721088435, | |
| "grad_norm": 2.8993630409240723, | |
| "learning_rate": 1.2346083841021928e-05, | |
| "loss": 3.3684, | |
| "num_input_tokens_seen": 185424, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 2.0408163265306123, | |
| "grad_norm": 1.4063067436218262, | |
| "learning_rate": 1.158629057059343e-05, | |
| "loss": 3.2909, | |
| "num_input_tokens_seen": 189040, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.074829931972789, | |
| "grad_norm": 4.071662425994873, | |
| "learning_rate": 1.084351361658816e-05, | |
| "loss": 3.2623, | |
| "num_input_tokens_seen": 191856, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 2.108843537414966, | |
| "grad_norm": 3.5538320541381836, | |
| "learning_rate": 1.0118695248387141e-05, | |
| "loss": 3.3487, | |
| "num_input_tokens_seen": 194720, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.142857142857143, | |
| "grad_norm": 2.685102939605713, | |
| "learning_rate": 9.412754953531663e-06, | |
| "loss": 3.2409, | |
| "num_input_tokens_seen": 197808, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 2.17687074829932, | |
| "grad_norm": 3.332947015762329, | |
| "learning_rate": 8.726588271282978e-06, | |
| "loss": 3.2031, | |
| "num_input_tokens_seen": 201040, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.2108843537414966, | |
| "grad_norm": 3.082350492477417, | |
| "learning_rate": 8.061065656562269e-06, | |
| "loss": 2.9128, | |
| "num_input_tokens_seen": 204944, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 2.2448979591836733, | |
| "grad_norm": 4.072040557861328, | |
| "learning_rate": 7.417031375712008e-06, | |
| "loss": 3.4445, | |
| "num_input_tokens_seen": 207712, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.2789115646258504, | |
| "grad_norm": 3.686593770980835, | |
| "learning_rate": 6.795302435479523e-06, | |
| "loss": 3.0001, | |
| "num_input_tokens_seen": 210576, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 2.312925170068027, | |
| "grad_norm": 2.2774229049682617, | |
| "learning_rate": 6.196667546581405e-06, | |
| "loss": 3.3204, | |
| "num_input_tokens_seen": 213712, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.3469387755102042, | |
| "grad_norm": 3.1405270099639893, | |
| "learning_rate": 5.621886123163708e-06, | |
| "loss": 3.1907, | |
| "num_input_tokens_seen": 216464, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 2.380952380952381, | |
| "grad_norm": 2.792036771774292, | |
| "learning_rate": 5.071687319426946e-06, | |
| "loss": 3.298, | |
| "num_input_tokens_seen": 219616, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.4149659863945576, | |
| "grad_norm": 3.187742233276367, | |
| "learning_rate": 4.5467691046382435e-06, | |
| "loss": 3.2584, | |
| "num_input_tokens_seen": 222848, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 2.4489795918367347, | |
| "grad_norm": 3.170606851577759, | |
| "learning_rate": 4.047797377703985e-06, | |
| "loss": 3.3844, | |
| "num_input_tokens_seen": 225568, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.4829931972789114, | |
| "grad_norm": 3.8285155296325684, | |
| "learning_rate": 3.5754051224260176e-06, | |
| "loss": 3.3962, | |
| "num_input_tokens_seen": 228832, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 2.5170068027210886, | |
| "grad_norm": 3.4845287799835205, | |
| "learning_rate": 3.130191604513352e-06, | |
| "loss": 3.1933, | |
| "num_input_tokens_seen": 231968, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.5510204081632653, | |
| "grad_norm": 3.385293960571289, | |
| "learning_rate": 2.7127216113677635e-06, | |
| "loss": 3.1452, | |
| "num_input_tokens_seen": 235840, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 2.5850340136054424, | |
| "grad_norm": 4.231153964996338, | |
| "learning_rate": 2.323524735607749e-06, | |
| "loss": 3.3513, | |
| "num_input_tokens_seen": 238768, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.619047619047619, | |
| "grad_norm": 2.916943311691284, | |
| "learning_rate": 1.9630947032398067e-06, | |
| "loss": 2.9965, | |
| "num_input_tokens_seen": 242240, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 2.6530612244897958, | |
| "grad_norm": 2.958503484725952, | |
| "learning_rate": 1.6318887473292243e-06, | |
| "loss": 3.1061, | |
| "num_input_tokens_seen": 245248, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.687074829931973, | |
| "grad_norm": 2.5356791019439697, | |
| "learning_rate": 1.3303270279649477e-06, | |
| "loss": 3.0728, | |
| "num_input_tokens_seen": 248416, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 2.7210884353741496, | |
| "grad_norm": 3.929884672164917, | |
| "learning_rate": 1.0587920992543853e-06, | |
| "loss": 3.1985, | |
| "num_input_tokens_seen": 251712, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.7551020408163263, | |
| "grad_norm": 3.698723077774048, | |
| "learning_rate": 8.176284240242638e-07, | |
| "loss": 3.3142, | |
| "num_input_tokens_seen": 254496, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 2.7891156462585034, | |
| "grad_norm": 3.604297637939453, | |
| "learning_rate": 6.071419368431779e-07, | |
| "loss": 3.4238, | |
| "num_input_tokens_seen": 257760, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.8231292517006805, | |
| "grad_norm": 4.10955810546875, | |
| "learning_rate": 4.2759965592018283e-07, | |
| "loss": 3.3226, | |
| "num_input_tokens_seen": 260784, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 3.1334779262542725, | |
| "learning_rate": 2.7922934437178695e-07, | |
| "loss": 3.2627, | |
| "num_input_tokens_seen": 263792, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.891156462585034, | |
| "grad_norm": 3.3782756328582764, | |
| "learning_rate": 1.6221922128700217e-07, | |
| "loss": 3.2338, | |
| "num_input_tokens_seen": 266784, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 2.925170068027211, | |
| "grad_norm": 2.9588587284088135, | |
| "learning_rate": 7.671772295704815e-08, | |
| "loss": 3.1991, | |
| "num_input_tokens_seen": 269936, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.9591836734693877, | |
| "grad_norm": 3.1428213119506836, | |
| "learning_rate": 2.2833314572542895e-08, | |
| "loss": 3.3015, | |
| "num_input_tokens_seen": 272704, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 2.9931972789115644, | |
| "grad_norm": 4.047586917877197, | |
| "learning_rate": 6.343526271379574e-10, | |
| "loss": 3.4811, | |
| "num_input_tokens_seen": 276208, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "num_input_tokens_seen": 276856, | |
| "step": 441, | |
| "total_flos": 2191990201638912.0, | |
| "train_loss": 3.4709230188339477, | |
| "train_runtime": 2415.5729, | |
| "train_samples_per_second": 2.92, | |
| "train_steps_per_second": 0.183 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 441, | |
| "num_input_tokens_seen": 276856, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2191990201638912.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |