| { |
| "best_global_step": 1140, |
| "best_metric": 1.006801962852478, |
| "best_model_checkpoint": "saves_multiple/lntuning/llama-3-8b-instruct/train_cb_789_1760637870/checkpoint-1140", |
| "epoch": 20.0, |
| "eval_steps": 57, |
| "global_step": 1140, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08771929824561403, |
| "grad_norm": 8.9375, |
| "learning_rate": 1.7543859649122807e-06, |
| "loss": 1.1255, |
| "num_input_tokens_seen": 3136, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.17543859649122806, |
| "grad_norm": 8.3125, |
| "learning_rate": 3.9473684210526315e-06, |
| "loss": 0.9721, |
| "num_input_tokens_seen": 6112, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2631578947368421, |
| "grad_norm": 7.34375, |
| "learning_rate": 6.140350877192982e-06, |
| "loss": 0.8981, |
| "num_input_tokens_seen": 10112, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.3508771929824561, |
| "grad_norm": 8.75, |
| "learning_rate": 8.333333333333334e-06, |
| "loss": 1.3077, |
| "num_input_tokens_seen": 13280, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.43859649122807015, |
| "grad_norm": 9.1875, |
| "learning_rate": 1.0526315789473684e-05, |
| "loss": 1.1653, |
| "num_input_tokens_seen": 16288, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.5263157894736842, |
| "grad_norm": 7.53125, |
| "learning_rate": 1.2719298245614037e-05, |
| "loss": 1.0951, |
| "num_input_tokens_seen": 19104, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.6140350877192983, |
| "grad_norm": 6.09375, |
| "learning_rate": 1.4912280701754386e-05, |
| "loss": 1.186, |
| "num_input_tokens_seen": 22144, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.7017543859649122, |
| "grad_norm": 8.3125, |
| "learning_rate": 1.7105263157894737e-05, |
| "loss": 1.0012, |
| "num_input_tokens_seen": 25792, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.7894736842105263, |
| "grad_norm": 9.1875, |
| "learning_rate": 1.929824561403509e-05, |
| "loss": 1.2075, |
| "num_input_tokens_seen": 28576, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.8771929824561403, |
| "grad_norm": 11.5, |
| "learning_rate": 2.149122807017544e-05, |
| "loss": 1.3257, |
| "num_input_tokens_seen": 31424, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.9649122807017544, |
| "grad_norm": 8.125, |
| "learning_rate": 2.368421052631579e-05, |
| "loss": 0.998, |
| "num_input_tokens_seen": 34720, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 1.153038740158081, |
| "eval_runtime": 0.6276, |
| "eval_samples_per_second": 39.837, |
| "eval_steps_per_second": 11.154, |
| "num_input_tokens_seen": 35448, |
| "step": 57 |
| }, |
| { |
| "epoch": 1.0526315789473684, |
| "grad_norm": 6.5, |
| "learning_rate": 2.5877192982456143e-05, |
| "loss": 0.8236, |
| "num_input_tokens_seen": 37688, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.1403508771929824, |
| "grad_norm": 7.34375, |
| "learning_rate": 2.8070175438596492e-05, |
| "loss": 1.1031, |
| "num_input_tokens_seen": 40792, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.2280701754385965, |
| "grad_norm": 7.84375, |
| "learning_rate": 3.0263157894736844e-05, |
| "loss": 1.1953, |
| "num_input_tokens_seen": 43832, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.3157894736842106, |
| "grad_norm": 7.75, |
| "learning_rate": 3.24561403508772e-05, |
| "loss": 1.1033, |
| "num_input_tokens_seen": 46648, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.4035087719298245, |
| "grad_norm": 9.1875, |
| "learning_rate": 3.4649122807017546e-05, |
| "loss": 1.0817, |
| "num_input_tokens_seen": 49848, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.4912280701754386, |
| "grad_norm": 8.375, |
| "learning_rate": 3.6842105263157895e-05, |
| "loss": 1.2344, |
| "num_input_tokens_seen": 52504, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.5789473684210527, |
| "grad_norm": 9.0625, |
| "learning_rate": 3.9035087719298244e-05, |
| "loss": 0.9732, |
| "num_input_tokens_seen": 55448, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 8.1875, |
| "learning_rate": 4.12280701754386e-05, |
| "loss": 1.0791, |
| "num_input_tokens_seen": 58776, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.7543859649122808, |
| "grad_norm": 7.75, |
| "learning_rate": 4.342105263157895e-05, |
| "loss": 1.0441, |
| "num_input_tokens_seen": 61624, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.8421052631578947, |
| "grad_norm": 6.65625, |
| "learning_rate": 4.56140350877193e-05, |
| "loss": 1.0914, |
| "num_input_tokens_seen": 65336, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.9298245614035088, |
| "grad_norm": 8.375, |
| "learning_rate": 4.780701754385965e-05, |
| "loss": 1.1776, |
| "num_input_tokens_seen": 68280, |
| "step": 110 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 1.1269092559814453, |
| "eval_runtime": 0.6349, |
| "eval_samples_per_second": 39.374, |
| "eval_steps_per_second": 11.025, |
| "num_input_tokens_seen": 70496, |
| "step": 114 |
| }, |
| { |
| "epoch": 2.017543859649123, |
| "grad_norm": 9.0, |
| "learning_rate": 5e-05, |
| "loss": 1.1226, |
| "num_input_tokens_seen": 71008, |
| "step": 115 |
| }, |
| { |
| "epoch": 2.1052631578947367, |
| "grad_norm": 10.8125, |
| "learning_rate": 4.999707014206475e-05, |
| "loss": 1.0517, |
| "num_input_tokens_seen": 74176, |
| "step": 120 |
| }, |
| { |
| "epoch": 2.192982456140351, |
| "grad_norm": 8.5, |
| "learning_rate": 4.9988281254984414e-05, |
| "loss": 1.0498, |
| "num_input_tokens_seen": 77472, |
| "step": 125 |
| }, |
| { |
| "epoch": 2.280701754385965, |
| "grad_norm": 8.0, |
| "learning_rate": 4.997363539877422e-05, |
| "loss": 1.0882, |
| "num_input_tokens_seen": 80192, |
| "step": 130 |
| }, |
| { |
| "epoch": 2.3684210526315788, |
| "grad_norm": 11.0, |
| "learning_rate": 4.9953136006256415e-05, |
| "loss": 1.1703, |
| "num_input_tokens_seen": 83424, |
| "step": 135 |
| }, |
| { |
| "epoch": 2.456140350877193, |
| "grad_norm": 7.9375, |
| "learning_rate": 4.9926787882255636e-05, |
| "loss": 1.0101, |
| "num_input_tokens_seen": 86080, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "grad_norm": 8.0625, |
| "learning_rate": 4.9894597202472696e-05, |
| "loss": 1.12, |
| "num_input_tokens_seen": 89600, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.6315789473684212, |
| "grad_norm": 7.96875, |
| "learning_rate": 4.985657151203706e-05, |
| "loss": 1.0743, |
| "num_input_tokens_seen": 92704, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.719298245614035, |
| "grad_norm": 7.8125, |
| "learning_rate": 4.9812719723738435e-05, |
| "loss": 0.9319, |
| "num_input_tokens_seen": 96160, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.807017543859649, |
| "grad_norm": 8.375, |
| "learning_rate": 4.976305211593758e-05, |
| "loss": 1.0211, |
| "num_input_tokens_seen": 99520, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.8947368421052633, |
| "grad_norm": 7.375, |
| "learning_rate": 4.970758033015731e-05, |
| "loss": 1.031, |
| "num_input_tokens_seen": 102528, |
| "step": 165 |
| }, |
| { |
| "epoch": 2.982456140350877, |
| "grad_norm": 8.5, |
| "learning_rate": 4.9646317368353743e-05, |
| "loss": 1.1544, |
| "num_input_tokens_seen": 106304, |
| "step": 170 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 1.118430256843567, |
| "eval_runtime": 0.6455, |
| "eval_samples_per_second": 38.731, |
| "eval_steps_per_second": 10.845, |
| "num_input_tokens_seen": 106416, |
| "step": 171 |
| }, |
| { |
| "epoch": 3.0701754385964914, |
| "grad_norm": 7.9375, |
| "learning_rate": 4.957927758986888e-05, |
| "loss": 1.0566, |
| "num_input_tokens_seen": 108880, |
| "step": 175 |
| }, |
| { |
| "epoch": 3.1578947368421053, |
| "grad_norm": 9.875, |
| "learning_rate": 4.9506476708064865e-05, |
| "loss": 1.2911, |
| "num_input_tokens_seen": 111984, |
| "step": 180 |
| }, |
| { |
| "epoch": 3.245614035087719, |
| "grad_norm": 8.125, |
| "learning_rate": 4.9427931786641e-05, |
| "loss": 0.903, |
| "num_input_tokens_seen": 115248, |
| "step": 185 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 10.125, |
| "learning_rate": 4.93436612356342e-05, |
| "loss": 0.9444, |
| "num_input_tokens_seen": 118384, |
| "step": 190 |
| }, |
| { |
| "epoch": 3.4210526315789473, |
| "grad_norm": 8.8125, |
| "learning_rate": 4.925368480710385e-05, |
| "loss": 1.0751, |
| "num_input_tokens_seen": 122224, |
| "step": 195 |
| }, |
| { |
| "epoch": 3.5087719298245617, |
| "grad_norm": 6.6875, |
| "learning_rate": 4.915802359050222e-05, |
| "loss": 0.8527, |
| "num_input_tokens_seen": 125584, |
| "step": 200 |
| }, |
| { |
| "epoch": 3.5964912280701755, |
| "grad_norm": 7.40625, |
| "learning_rate": 4.905670000773126e-05, |
| "loss": 1.1012, |
| "num_input_tokens_seen": 128816, |
| "step": 205 |
| }, |
| { |
| "epoch": 3.6842105263157894, |
| "grad_norm": 9.875, |
| "learning_rate": 4.894973780788722e-05, |
| "loss": 1.2059, |
| "num_input_tokens_seen": 131952, |
| "step": 210 |
| }, |
| { |
| "epoch": 3.7719298245614032, |
| "grad_norm": 6.9375, |
| "learning_rate": 4.88371620616941e-05, |
| "loss": 0.9804, |
| "num_input_tokens_seen": 135248, |
| "step": 215 |
| }, |
| { |
| "epoch": 3.8596491228070176, |
| "grad_norm": 5.25, |
| "learning_rate": 4.871899915562736e-05, |
| "loss": 0.8994, |
| "num_input_tokens_seen": 138384, |
| "step": 220 |
| }, |
| { |
| "epoch": 3.9473684210526314, |
| "grad_norm": 10.625, |
| "learning_rate": 4.8595276785729236e-05, |
| "loss": 1.2161, |
| "num_input_tokens_seen": 141264, |
| "step": 225 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_loss": 1.0681884288787842, |
| "eval_runtime": 0.6373, |
| "eval_samples_per_second": 39.225, |
| "eval_steps_per_second": 10.983, |
| "num_input_tokens_seen": 142480, |
| "step": 228 |
| }, |
| { |
| "epoch": 4.035087719298246, |
| "grad_norm": 10.4375, |
| "learning_rate": 4.846602395111711e-05, |
| "loss": 1.1207, |
| "num_input_tokens_seen": 143664, |
| "step": 230 |
| }, |
| { |
| "epoch": 4.12280701754386, |
| "grad_norm": 5.96875, |
| "learning_rate": 4.833127094718643e-05, |
| "loss": 0.9329, |
| "num_input_tokens_seen": 146800, |
| "step": 235 |
| }, |
| { |
| "epoch": 4.2105263157894735, |
| "grad_norm": 7.78125, |
| "learning_rate": 4.819104935850983e-05, |
| "loss": 0.9884, |
| "num_input_tokens_seen": 149456, |
| "step": 240 |
| }, |
| { |
| "epoch": 4.298245614035087, |
| "grad_norm": 7.25, |
| "learning_rate": 4.804539205143405e-05, |
| "loss": 1.0246, |
| "num_input_tokens_seen": 152880, |
| "step": 245 |
| }, |
| { |
| "epoch": 4.385964912280702, |
| "grad_norm": 7.53125, |
| "learning_rate": 4.789433316637644e-05, |
| "loss": 1.0274, |
| "num_input_tokens_seen": 156720, |
| "step": 250 |
| }, |
| { |
| "epoch": 4.473684210526316, |
| "grad_norm": 8.75, |
| "learning_rate": 4.7737908109822854e-05, |
| "loss": 1.1004, |
| "num_input_tokens_seen": 159600, |
| "step": 255 |
| }, |
| { |
| "epoch": 4.56140350877193, |
| "grad_norm": 7.03125, |
| "learning_rate": 4.757615354602874e-05, |
| "loss": 1.1067, |
| "num_input_tokens_seen": 162224, |
| "step": 260 |
| }, |
| { |
| "epoch": 4.649122807017544, |
| "grad_norm": 8.4375, |
| "learning_rate": 4.7409107388425504e-05, |
| "loss": 0.889, |
| "num_input_tokens_seen": 165328, |
| "step": 265 |
| }, |
| { |
| "epoch": 4.7368421052631575, |
| "grad_norm": 10.125, |
| "learning_rate": 4.723680879073396e-05, |
| "loss": 1.0886, |
| "num_input_tokens_seen": 168592, |
| "step": 270 |
| }, |
| { |
| "epoch": 4.824561403508772, |
| "grad_norm": 7.84375, |
| "learning_rate": 4.70592981377872e-05, |
| "loss": 0.9943, |
| "num_input_tokens_seen": 171696, |
| "step": 275 |
| }, |
| { |
| "epoch": 4.912280701754386, |
| "grad_norm": 8.75, |
| "learning_rate": 4.6876617036064844e-05, |
| "loss": 1.0702, |
| "num_input_tokens_seen": 174672, |
| "step": 280 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 4.90625, |
| "learning_rate": 4.668880830394093e-05, |
| "loss": 0.8968, |
| "num_input_tokens_seen": 177224, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.0, |
| "eval_loss": 1.06232750415802, |
| "eval_runtime": 0.6382, |
| "eval_samples_per_second": 39.174, |
| "eval_steps_per_second": 10.969, |
| "num_input_tokens_seen": 177224, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "grad_norm": 8.25, |
| "learning_rate": 4.649591596164778e-05, |
| "loss": 1.0547, |
| "num_input_tokens_seen": 180840, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.175438596491228, |
| "grad_norm": 8.125, |
| "learning_rate": 4.629798522095818e-05, |
| "loss": 0.9693, |
| "num_input_tokens_seen": 183976, |
| "step": 295 |
| }, |
| { |
| "epoch": 5.2631578947368425, |
| "grad_norm": 7.53125, |
| "learning_rate": 4.6095062474588225e-05, |
| "loss": 0.9876, |
| "num_input_tokens_seen": 187016, |
| "step": 300 |
| }, |
| { |
| "epoch": 5.350877192982456, |
| "grad_norm": 9.1875, |
| "learning_rate": 4.588719528532342e-05, |
| "loss": 1.0643, |
| "num_input_tokens_seen": 189480, |
| "step": 305 |
| }, |
| { |
| "epoch": 5.43859649122807, |
| "grad_norm": 6.4375, |
| "learning_rate": 4.5674432374870455e-05, |
| "loss": 1.0304, |
| "num_input_tokens_seen": 192712, |
| "step": 310 |
| }, |
| { |
| "epoch": 5.526315789473684, |
| "grad_norm": 8.75, |
| "learning_rate": 4.545682361243748e-05, |
| "loss": 1.1636, |
| "num_input_tokens_seen": 195496, |
| "step": 315 |
| }, |
| { |
| "epoch": 5.614035087719298, |
| "grad_norm": 7.65625, |
| "learning_rate": 4.5234420003045236e-05, |
| "loss": 0.9103, |
| "num_input_tokens_seen": 199240, |
| "step": 320 |
| }, |
| { |
| "epoch": 5.701754385964913, |
| "grad_norm": 9.5, |
| "learning_rate": 4.5007273675572104e-05, |
| "loss": 1.0893, |
| "num_input_tokens_seen": 201832, |
| "step": 325 |
| }, |
| { |
| "epoch": 5.7894736842105265, |
| "grad_norm": 7.78125, |
| "learning_rate": 4.4775437870535685e-05, |
| "loss": 0.7587, |
| "num_input_tokens_seen": 204968, |
| "step": 330 |
| }, |
| { |
| "epoch": 5.87719298245614, |
| "grad_norm": 9.75, |
| "learning_rate": 4.4538966927613836e-05, |
| "loss": 1.0155, |
| "num_input_tokens_seen": 208360, |
| "step": 335 |
| }, |
| { |
| "epoch": 5.964912280701754, |
| "grad_norm": 8.5625, |
| "learning_rate": 4.4297916272908024e-05, |
| "loss": 1.0029, |
| "num_input_tokens_seen": 211080, |
| "step": 340 |
| }, |
| { |
| "epoch": 6.0, |
| "eval_loss": 1.0562862157821655, |
| "eval_runtime": 0.6381, |
| "eval_samples_per_second": 39.178, |
| "eval_steps_per_second": 10.97, |
| "num_input_tokens_seen": 212000, |
| "step": 342 |
| }, |
| { |
| "epoch": 6.052631578947368, |
| "grad_norm": 7.5, |
| "learning_rate": 4.405234240595214e-05, |
| "loss": 0.94, |
| "num_input_tokens_seen": 214176, |
| "step": 345 |
| }, |
| { |
| "epoch": 6.140350877192983, |
| "grad_norm": 10.25, |
| "learning_rate": 4.3802302886469606e-05, |
| "loss": 1.0575, |
| "num_input_tokens_seen": 216736, |
| "step": 350 |
| }, |
| { |
| "epoch": 6.228070175438597, |
| "grad_norm": 7.0, |
| "learning_rate": 4.3547856320882044e-05, |
| "loss": 0.977, |
| "num_input_tokens_seen": 220224, |
| "step": 355 |
| }, |
| { |
| "epoch": 6.315789473684211, |
| "grad_norm": 7.9375, |
| "learning_rate": 4.328906234857259e-05, |
| "loss": 0.9086, |
| "num_input_tokens_seen": 223136, |
| "step": 360 |
| }, |
| { |
| "epoch": 6.4035087719298245, |
| "grad_norm": 9.5, |
| "learning_rate": 4.302598162790712e-05, |
| "loss": 1.0231, |
| "num_input_tokens_seen": 226944, |
| "step": 365 |
| }, |
| { |
| "epoch": 6.491228070175438, |
| "grad_norm": 7.0, |
| "learning_rate": 4.27586758220166e-05, |
| "loss": 0.98, |
| "num_input_tokens_seen": 230368, |
| "step": 370 |
| }, |
| { |
| "epoch": 6.578947368421053, |
| "grad_norm": 9.25, |
| "learning_rate": 4.2487207584343955e-05, |
| "loss": 0.9402, |
| "num_input_tokens_seen": 233376, |
| "step": 375 |
| }, |
| { |
| "epoch": 6.666666666666667, |
| "grad_norm": 7.21875, |
| "learning_rate": 4.2211640543958796e-05, |
| "loss": 0.984, |
| "num_input_tokens_seen": 236384, |
| "step": 380 |
| }, |
| { |
| "epoch": 6.754385964912281, |
| "grad_norm": 9.875, |
| "learning_rate": 4.193203929064353e-05, |
| "loss": 1.1068, |
| "num_input_tokens_seen": 239584, |
| "step": 385 |
| }, |
| { |
| "epoch": 6.842105263157895, |
| "grad_norm": 8.0625, |
| "learning_rate": 4.164846935975421e-05, |
| "loss": 0.9963, |
| "num_input_tokens_seen": 242624, |
| "step": 390 |
| }, |
| { |
| "epoch": 6.9298245614035086, |
| "grad_norm": 6.375, |
| "learning_rate": 4.136099721685983e-05, |
| "loss": 0.8655, |
| "num_input_tokens_seen": 246304, |
| "step": 395 |
| }, |
| { |
| "epoch": 7.0, |
| "eval_loss": 1.0370022058486938, |
| "eval_runtime": 0.6392, |
| "eval_samples_per_second": 39.109, |
| "eval_steps_per_second": 10.951, |
| "num_input_tokens_seen": 248272, |
| "step": 399 |
| }, |
| { |
| "epoch": 7.017543859649122, |
| "grad_norm": 8.75, |
| "learning_rate": 4.1069690242163484e-05, |
| "loss": 1.024, |
| "num_input_tokens_seen": 248912, |
| "step": 400 |
| }, |
| { |
| "epoch": 7.105263157894737, |
| "grad_norm": 9.1875, |
| "learning_rate": 4.0774616714709316e-05, |
| "loss": 0.9506, |
| "num_input_tokens_seen": 252016, |
| "step": 405 |
| }, |
| { |
| "epoch": 7.192982456140351, |
| "grad_norm": 7.65625, |
| "learning_rate": 4.047584579637857e-05, |
| "loss": 0.8695, |
| "num_input_tokens_seen": 255856, |
| "step": 410 |
| }, |
| { |
| "epoch": 7.280701754385965, |
| "grad_norm": 10.8125, |
| "learning_rate": 4.0173447515678916e-05, |
| "loss": 1.2329, |
| "num_input_tokens_seen": 258800, |
| "step": 415 |
| }, |
| { |
| "epoch": 7.368421052631579, |
| "grad_norm": 7.59375, |
| "learning_rate": 3.986749275133057e-05, |
| "loss": 0.9025, |
| "num_input_tokens_seen": 261584, |
| "step": 420 |
| }, |
| { |
| "epoch": 7.456140350877193, |
| "grad_norm": 8.8125, |
| "learning_rate": 3.955805321565304e-05, |
| "loss": 0.8505, |
| "num_input_tokens_seen": 265168, |
| "step": 425 |
| }, |
| { |
| "epoch": 7.543859649122807, |
| "grad_norm": 5.75, |
| "learning_rate": 3.9245201437756654e-05, |
| "loss": 0.8478, |
| "num_input_tokens_seen": 268240, |
| "step": 430 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "grad_norm": 8.6875, |
| "learning_rate": 3.892901074654255e-05, |
| "loss": 0.9857, |
| "num_input_tokens_seen": 271728, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.719298245614035, |
| "grad_norm": 7.625, |
| "learning_rate": 3.860955525351516e-05, |
| "loss": 1.0858, |
| "num_input_tokens_seen": 274736, |
| "step": 440 |
| }, |
| { |
| "epoch": 7.807017543859649, |
| "grad_norm": 7.4375, |
| "learning_rate": 3.82869098354114e-05, |
| "loss": 1.0428, |
| "num_input_tokens_seen": 278096, |
| "step": 445 |
| }, |
| { |
| "epoch": 7.894736842105263, |
| "grad_norm": 8.9375, |
| "learning_rate": 3.796115011665034e-05, |
| "loss": 0.9743, |
| "num_input_tokens_seen": 280592, |
| "step": 450 |
| }, |
| { |
| "epoch": 7.982456140350877, |
| "grad_norm": 8.3125, |
| "learning_rate": 3.763235245160775e-05, |
| "loss": 0.9519, |
| "num_input_tokens_seen": 284144, |
| "step": 455 |
| }, |
| { |
| "epoch": 8.0, |
| "eval_loss": 1.0199482440948486, |
| "eval_runtime": 0.6367, |
| "eval_samples_per_second": 39.267, |
| "eval_steps_per_second": 10.995, |
| "num_input_tokens_seen": 284248, |
| "step": 456 |
| }, |
| { |
| "epoch": 8.070175438596491, |
| "grad_norm": 6.09375, |
| "learning_rate": 3.7300593906719464e-05, |
| "loss": 0.9602, |
| "num_input_tokens_seen": 287128, |
| "step": 460 |
| }, |
| { |
| "epoch": 8.157894736842104, |
| "grad_norm": 8.25, |
| "learning_rate": 3.69659522424179e-05, |
| "loss": 0.8555, |
| "num_input_tokens_seen": 290744, |
| "step": 465 |
| }, |
| { |
| "epoch": 8.24561403508772, |
| "grad_norm": 9.1875, |
| "learning_rate": 3.662850589490592e-05, |
| "loss": 0.9742, |
| "num_input_tokens_seen": 293816, |
| "step": 470 |
| }, |
| { |
| "epoch": 8.333333333333334, |
| "grad_norm": 8.9375, |
| "learning_rate": 3.628833395777224e-05, |
| "loss": 1.0043, |
| "num_input_tokens_seen": 297080, |
| "step": 475 |
| }, |
| { |
| "epoch": 8.421052631578947, |
| "grad_norm": 10.75, |
| "learning_rate": 3.59455161634528e-05, |
| "loss": 0.9994, |
| "num_input_tokens_seen": 300408, |
| "step": 480 |
| }, |
| { |
| "epoch": 8.508771929824562, |
| "grad_norm": 7.4375, |
| "learning_rate": 3.560013286454242e-05, |
| "loss": 1.0842, |
| "num_input_tokens_seen": 303384, |
| "step": 485 |
| }, |
| { |
| "epoch": 8.596491228070175, |
| "grad_norm": 10.5625, |
| "learning_rate": 3.5252265014961006e-05, |
| "loss": 0.9779, |
| "num_input_tokens_seen": 306232, |
| "step": 490 |
| }, |
| { |
| "epoch": 8.68421052631579, |
| "grad_norm": 5.46875, |
| "learning_rate": 3.490199415097892e-05, |
| "loss": 0.8759, |
| "num_input_tokens_seen": 309816, |
| "step": 495 |
| }, |
| { |
| "epoch": 8.771929824561404, |
| "grad_norm": 7.6875, |
| "learning_rate": 3.45494023721058e-05, |
| "loss": 0.9218, |
| "num_input_tokens_seen": 312408, |
| "step": 500 |
| }, |
| { |
| "epoch": 8.859649122807017, |
| "grad_norm": 12.125, |
| "learning_rate": 3.4194572321847336e-05, |
| "loss": 0.9384, |
| "num_input_tokens_seen": 315416, |
| "step": 505 |
| }, |
| { |
| "epoch": 8.947368421052632, |
| "grad_norm": 7.46875, |
| "learning_rate": 3.383758716833459e-05, |
| "loss": 1.0435, |
| "num_input_tokens_seen": 318392, |
| "step": 510 |
| }, |
| { |
| "epoch": 9.0, |
| "eval_loss": 1.0256277322769165, |
| "eval_runtime": 0.6382, |
| "eval_samples_per_second": 39.17, |
| "eval_steps_per_second": 10.968, |
| "num_input_tokens_seen": 319488, |
| "step": 513 |
| }, |
| { |
| "epoch": 9.035087719298245, |
| "grad_norm": 6.8125, |
| "learning_rate": 3.347853058483037e-05, |
| "loss": 0.9264, |
| "num_input_tokens_seen": 320832, |
| "step": 515 |
| }, |
| { |
| "epoch": 9.12280701754386, |
| "grad_norm": 6.78125, |
| "learning_rate": 3.311748673011709e-05, |
| "loss": 0.9468, |
| "num_input_tokens_seen": 323840, |
| "step": 520 |
| }, |
| { |
| "epoch": 9.210526315789474, |
| "grad_norm": 9.8125, |
| "learning_rate": 3.275454022877097e-05, |
| "loss": 0.9093, |
| "num_input_tokens_seen": 327200, |
| "step": 525 |
| }, |
| { |
| "epoch": 9.298245614035087, |
| "grad_norm": 8.5625, |
| "learning_rate": 3.238977615132697e-05, |
| "loss": 0.9013, |
| "num_input_tokens_seen": 330208, |
| "step": 530 |
| }, |
| { |
| "epoch": 9.385964912280702, |
| "grad_norm": 7.40625, |
| "learning_rate": 3.202327999433924e-05, |
| "loss": 0.9143, |
| "num_input_tokens_seen": 332864, |
| "step": 535 |
| }, |
| { |
| "epoch": 9.473684210526315, |
| "grad_norm": 7.5625, |
| "learning_rate": 3.165513766034167e-05, |
| "loss": 1.0975, |
| "num_input_tokens_seen": 336224, |
| "step": 540 |
| }, |
| { |
| "epoch": 9.56140350877193, |
| "grad_norm": 8.5625, |
| "learning_rate": 3.128543543771336e-05, |
| "loss": 0.892, |
| "num_input_tokens_seen": 339392, |
| "step": 545 |
| }, |
| { |
| "epoch": 9.649122807017545, |
| "grad_norm": 7.46875, |
| "learning_rate": 3.091425998045356e-05, |
| "loss": 0.9657, |
| "num_input_tokens_seen": 342624, |
| "step": 550 |
| }, |
| { |
| "epoch": 9.736842105263158, |
| "grad_norm": 9.8125, |
| "learning_rate": 3.0541698287870965e-05, |
| "loss": 0.958, |
| "num_input_tokens_seen": 345472, |
| "step": 555 |
| }, |
| { |
| "epoch": 9.824561403508772, |
| "grad_norm": 8.0, |
| "learning_rate": 3.01678376841921e-05, |
| "loss": 1.1604, |
| "num_input_tokens_seen": 348160, |
| "step": 560 |
| }, |
| { |
| "epoch": 9.912280701754385, |
| "grad_norm": 7.0625, |
| "learning_rate": 2.9792765798093465e-05, |
| "loss": 1.0537, |
| "num_input_tokens_seen": 351456, |
| "step": 565 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 7.375, |
| "learning_rate": 2.94165705421624e-05, |
| "loss": 0.9225, |
| "num_input_tokens_seen": 354472, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.0, |
| "eval_loss": 1.0201079845428467, |
| "eval_runtime": 0.6371, |
| "eval_samples_per_second": 39.242, |
| "eval_steps_per_second": 10.988, |
| "num_input_tokens_seen": 354472, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.087719298245615, |
| "grad_norm": 8.5625, |
| "learning_rate": 2.9039340092291373e-05, |
| "loss": 0.8843, |
| "num_input_tokens_seen": 358024, |
| "step": 575 |
| }, |
| { |
| "epoch": 10.175438596491228, |
| "grad_norm": 8.875, |
| "learning_rate": 2.8661162867010543e-05, |
| "loss": 0.9294, |
| "num_input_tokens_seen": 361192, |
| "step": 580 |
| }, |
| { |
| "epoch": 10.263157894736842, |
| "grad_norm": 7.71875, |
| "learning_rate": 2.8282127506763456e-05, |
| "loss": 0.8901, |
| "num_input_tokens_seen": 363816, |
| "step": 585 |
| }, |
| { |
| "epoch": 10.350877192982455, |
| "grad_norm": 6.375, |
| "learning_rate": 2.7902322853130757e-05, |
| "loss": 1.0657, |
| "num_input_tokens_seen": 366440, |
| "step": 590 |
| }, |
| { |
| "epoch": 10.43859649122807, |
| "grad_norm": 8.5625, |
| "learning_rate": 2.752183792800671e-05, |
| "loss": 0.7652, |
| "num_input_tokens_seen": 369672, |
| "step": 595 |
| }, |
| { |
| "epoch": 10.526315789473685, |
| "grad_norm": 8.875, |
| "learning_rate": 2.7140761912733474e-05, |
| "loss": 1.1199, |
| "num_input_tokens_seen": 372808, |
| "step": 600 |
| }, |
| { |
| "epoch": 10.614035087719298, |
| "grad_norm": 8.4375, |
| "learning_rate": 2.6759184127198046e-05, |
| "loss": 1.1935, |
| "num_input_tokens_seen": 375560, |
| "step": 605 |
| }, |
| { |
| "epoch": 10.701754385964913, |
| "grad_norm": 8.3125, |
| "learning_rate": 2.6377194008896637e-05, |
| "loss": 0.8545, |
| "num_input_tokens_seen": 378792, |
| "step": 610 |
| }, |
| { |
| "epoch": 10.789473684210526, |
| "grad_norm": 8.0625, |
| "learning_rate": 2.5994881091971605e-05, |
| "loss": 0.8884, |
| "num_input_tokens_seen": 382312, |
| "step": 615 |
| }, |
| { |
| "epoch": 10.87719298245614, |
| "grad_norm": 7.40625, |
| "learning_rate": 2.5612334986225623e-05, |
| "loss": 1.0492, |
| "num_input_tokens_seen": 386088, |
| "step": 620 |
| }, |
| { |
| "epoch": 10.964912280701755, |
| "grad_norm": 6.59375, |
| "learning_rate": 2.5229645356118163e-05, |
| "loss": 0.9044, |
| "num_input_tokens_seen": 388840, |
| "step": 625 |
| }, |
| { |
| "epoch": 11.0, |
| "eval_loss": 1.0167433023452759, |
| "eval_runtime": 0.6388, |
| "eval_samples_per_second": 39.136, |
| "eval_steps_per_second": 10.958, |
| "num_input_tokens_seen": 389408, |
| "step": 627 |
| }, |
| { |
| "epoch": 11.052631578947368, |
| "grad_norm": 9.9375, |
| "learning_rate": 2.4846901899749185e-05, |
| "loss": 0.9719, |
| "num_input_tokens_seen": 391200, |
| "step": 630 |
| }, |
| { |
| "epoch": 11.140350877192983, |
| "grad_norm": 8.1875, |
| "learning_rate": 2.4464194327834926e-05, |
| "loss": 1.1275, |
| "num_input_tokens_seen": 394880, |
| "step": 635 |
| }, |
| { |
| "epoch": 11.228070175438596, |
| "grad_norm": 5.34375, |
| "learning_rate": 2.4081612342680694e-05, |
| "loss": 0.9087, |
| "num_input_tokens_seen": 398336, |
| "step": 640 |
| }, |
| { |
| "epoch": 11.31578947368421, |
| "grad_norm": 8.25, |
| "learning_rate": 2.369924561715569e-05, |
| "loss": 0.9089, |
| "num_input_tokens_seen": 401632, |
| "step": 645 |
| }, |
| { |
| "epoch": 11.403508771929825, |
| "grad_norm": 8.4375, |
| "learning_rate": 2.3317183773674718e-05, |
| "loss": 0.8306, |
| "num_input_tokens_seen": 405120, |
| "step": 650 |
| }, |
| { |
| "epoch": 11.491228070175438, |
| "grad_norm": 7.90625, |
| "learning_rate": 2.2935516363191693e-05, |
| "loss": 1.0235, |
| "num_input_tokens_seen": 408192, |
| "step": 655 |
| }, |
| { |
| "epoch": 11.578947368421053, |
| "grad_norm": 7.15625, |
| "learning_rate": 2.2554332844209904e-05, |
| "loss": 1.0326, |
| "num_input_tokens_seen": 411360, |
| "step": 660 |
| }, |
| { |
| "epoch": 11.666666666666666, |
| "grad_norm": 7.9375, |
| "learning_rate": 2.2173722561813987e-05, |
| "loss": 0.8318, |
| "num_input_tokens_seen": 414144, |
| "step": 665 |
| }, |
| { |
| "epoch": 11.75438596491228, |
| "grad_norm": 7.15625, |
| "learning_rate": 2.179377472672842e-05, |
| "loss": 1.0715, |
| "num_input_tokens_seen": 417760, |
| "step": 670 |
| }, |
| { |
| "epoch": 11.842105263157894, |
| "grad_norm": 8.3125, |
| "learning_rate": 2.1414578394407597e-05, |
| "loss": 1.0454, |
| "num_input_tokens_seen": 420640, |
| "step": 675 |
| }, |
| { |
| "epoch": 11.929824561403509, |
| "grad_norm": 8.6875, |
| "learning_rate": 2.1036222444162147e-05, |
| "loss": 0.9074, |
| "num_input_tokens_seen": 423360, |
| "step": 680 |
| }, |
| { |
| "epoch": 12.0, |
| "eval_loss": 1.0150678157806396, |
| "eval_runtime": 0.6368, |
| "eval_samples_per_second": 39.261, |
| "eval_steps_per_second": 10.993, |
| "num_input_tokens_seen": 425328, |
| "step": 684 |
| }, |
| { |
| "epoch": 12.017543859649123, |
| "grad_norm": 7.4375, |
| "learning_rate": 2.0658795558326743e-05, |
| "loss": 0.94, |
| "num_input_tokens_seen": 426288, |
| "step": 685 |
| }, |
| { |
| "epoch": 12.105263157894736, |
| "grad_norm": 8.625, |
| "learning_rate": 2.0282386201473894e-05, |
| "loss": 0.9892, |
| "num_input_tokens_seen": 429136, |
| "step": 690 |
| }, |
| { |
| "epoch": 12.192982456140351, |
| "grad_norm": 9.125, |
| "learning_rate": 1.99070825996789e-05, |
| "loss": 0.9312, |
| "num_input_tokens_seen": 432272, |
| "step": 695 |
| }, |
| { |
| "epoch": 12.280701754385966, |
| "grad_norm": 8.0625, |
| "learning_rate": 1.9532972719840607e-05, |
| "loss": 0.927, |
| "num_input_tokens_seen": 435760, |
| "step": 700 |
| }, |
| { |
| "epoch": 12.368421052631579, |
| "grad_norm": 7.75, |
| "learning_rate": 1.9160144249063035e-05, |
| "loss": 1.0059, |
| "num_input_tokens_seen": 439312, |
| "step": 705 |
| }, |
| { |
| "epoch": 12.456140350877194, |
| "grad_norm": 8.9375, |
| "learning_rate": 1.8788684574102467e-05, |
| "loss": 0.932, |
| "num_input_tokens_seen": 442640, |
| "step": 710 |
| }, |
| { |
| "epoch": 12.543859649122806, |
| "grad_norm": 7.59375, |
| "learning_rate": 1.8418680760885027e-05, |
| "loss": 1.0063, |
| "num_input_tokens_seen": 445648, |
| "step": 715 |
| }, |
| { |
| "epoch": 12.631578947368421, |
| "grad_norm": 8.0625, |
| "learning_rate": 1.805021953409934e-05, |
| "loss": 1.1111, |
| "num_input_tokens_seen": 448208, |
| "step": 720 |
| }, |
| { |
| "epoch": 12.719298245614034, |
| "grad_norm": 9.3125, |
| "learning_rate": 1.7683387256869353e-05, |
| "loss": 0.9897, |
| "num_input_tokens_seen": 451600, |
| "step": 725 |
| }, |
| { |
| "epoch": 12.807017543859649, |
| "grad_norm": 6.96875, |
| "learning_rate": 1.7318269910511736e-05, |
| "loss": 0.8884, |
| "num_input_tokens_seen": 454992, |
| "step": 730 |
| }, |
| { |
| "epoch": 12.894736842105264, |
| "grad_norm": 6.03125, |
| "learning_rate": 1.6954953074382863e-05, |
| "loss": 0.887, |
| "num_input_tokens_seen": 458032, |
| "step": 735 |
| }, |
| { |
| "epoch": 12.982456140350877, |
| "grad_norm": 5.46875, |
| "learning_rate": 1.659352190581993e-05, |
| "loss": 0.8735, |
| "num_input_tokens_seen": 461104, |
| "step": 740 |
| }, |
| { |
| "epoch": 13.0, |
| "eval_loss": 1.016857385635376, |
| "eval_runtime": 0.6455, |
| "eval_samples_per_second": 38.727, |
| "eval_steps_per_second": 10.844, |
| "num_input_tokens_seen": 461216, |
| "step": 741 |
| }, |
| { |
| "epoch": 13.070175438596491, |
| "grad_norm": 8.8125, |
| "learning_rate": 1.6234061120181142e-05, |
| "loss": 1.0766, |
| "num_input_tokens_seen": 463904, |
| "step": 745 |
| }, |
| { |
| "epoch": 13.157894736842104, |
| "grad_norm": 7.0625, |
| "learning_rate": 1.5876654970989308e-05, |
| "loss": 0.7744, |
| "num_input_tokens_seen": 467328, |
| "step": 750 |
| }, |
| { |
| "epoch": 13.24561403508772, |
| "grad_norm": 8.125, |
| "learning_rate": 1.552138723018382e-05, |
| "loss": 0.8808, |
| "num_input_tokens_seen": 469792, |
| "step": 755 |
| }, |
| { |
| "epoch": 13.333333333333334, |
| "grad_norm": 8.4375, |
| "learning_rate": 1.5168341168485423e-05, |
| "loss": 0.8573, |
| "num_input_tokens_seen": 472896, |
| "step": 760 |
| }, |
| { |
| "epoch": 13.421052631578947, |
| "grad_norm": 6.59375, |
| "learning_rate": 1.4817599535878565e-05, |
| "loss": 1.0543, |
| "num_input_tokens_seen": 475904, |
| "step": 765 |
| }, |
| { |
| "epoch": 13.508771929824562, |
| "grad_norm": 8.3125, |
| "learning_rate": 1.4469244542215682e-05, |
| "loss": 1.2265, |
| "num_input_tokens_seen": 478848, |
| "step": 770 |
| }, |
| { |
| "epoch": 13.596491228070175, |
| "grad_norm": 6.34375, |
| "learning_rate": 1.4123357837948175e-05, |
| "loss": 0.8088, |
| "num_input_tokens_seen": 482080, |
| "step": 775 |
| }, |
| { |
| "epoch": 13.68421052631579, |
| "grad_norm": 8.0, |
| "learning_rate": 1.3780020494988446e-05, |
| "loss": 0.9942, |
| "num_input_tokens_seen": 485440, |
| "step": 780 |
| }, |
| { |
| "epoch": 13.771929824561404, |
| "grad_norm": 9.5, |
| "learning_rate": 1.3439312987707615e-05, |
| "loss": 0.9109, |
| "num_input_tokens_seen": 488640, |
| "step": 785 |
| }, |
| { |
| "epoch": 13.859649122807017, |
| "grad_norm": 7.03125, |
| "learning_rate": 1.3101315174073162e-05, |
| "loss": 0.8781, |
| "num_input_tokens_seen": 492032, |
| "step": 790 |
| }, |
| { |
| "epoch": 13.947368421052632, |
| "grad_norm": 9.5, |
| "learning_rate": 1.2766106276931223e-05, |
| "loss": 0.9917, |
| "num_input_tokens_seen": 495264, |
| "step": 795 |
| }, |
| { |
| "epoch": 14.0, |
| "eval_loss": 1.0127995014190674, |
| "eval_runtime": 0.6356, |
| "eval_samples_per_second": 39.33, |
| "eval_steps_per_second": 11.012, |
| "num_input_tokens_seen": 496704, |
| "step": 798 |
| }, |
| { |
| "epoch": 14.035087719298245, |
| "grad_norm": 7.46875, |
| "learning_rate": 1.243376486543755e-05, |
| "loss": 0.9226, |
| "num_input_tokens_seen": 498464, |
| "step": 800 |
| }, |
| { |
| "epoch": 14.12280701754386, |
| "grad_norm": 5.9375, |
| "learning_rate": 1.2104368836641908e-05, |
| "loss": 0.8976, |
| "num_input_tokens_seen": 501632, |
| "step": 805 |
| }, |
| { |
| "epoch": 14.210526315789474, |
| "grad_norm": 9.0, |
| "learning_rate": 1.1777995397229771e-05, |
| "loss": 1.059, |
| "num_input_tokens_seen": 504544, |
| "step": 810 |
| }, |
| { |
| "epoch": 14.298245614035087, |
| "grad_norm": 8.125, |
| "learning_rate": 1.1454721045426073e-05, |
| "loss": 0.9036, |
| "num_input_tokens_seen": 507456, |
| "step": 815 |
| }, |
| { |
| "epoch": 14.385964912280702, |
| "grad_norm": 7.25, |
| "learning_rate": 1.113462155306478e-05, |
| "loss": 0.8365, |
| "num_input_tokens_seen": 511136, |
| "step": 820 |
| }, |
| { |
| "epoch": 14.473684210526315, |
| "grad_norm": 8.25, |
| "learning_rate": 1.0817771947828934e-05, |
| "loss": 0.9005, |
| "num_input_tokens_seen": 514368, |
| "step": 825 |
| }, |
| { |
| "epoch": 14.56140350877193, |
| "grad_norm": 7.3125, |
| "learning_rate": 1.0504246495664932e-05, |
| "loss": 1.0711, |
| "num_input_tokens_seen": 517248, |
| "step": 830 |
| }, |
| { |
| "epoch": 14.649122807017545, |
| "grad_norm": 7.875, |
| "learning_rate": 1.0194118683375503e-05, |
| "loss": 1.2334, |
| "num_input_tokens_seen": 520160, |
| "step": 835 |
| }, |
| { |
| "epoch": 14.736842105263158, |
| "grad_norm": 7.4375, |
| "learning_rate": 9.887461201395176e-06, |
| "loss": 0.9207, |
| "num_input_tokens_seen": 523424, |
| "step": 840 |
| }, |
| { |
| "epoch": 14.824561403508772, |
| "grad_norm": 9.0625, |
| "learning_rate": 9.584345926752524e-06, |
| "loss": 1.1004, |
| "num_input_tokens_seen": 526240, |
| "step": 845 |
| }, |
| { |
| "epoch": 14.912280701754385, |
| "grad_norm": 8.25, |
| "learning_rate": 9.284843906222948e-06, |
| "loss": 0.8829, |
| "num_input_tokens_seen": 529440, |
| "step": 850 |
| }, |
| { |
| "epoch": 15.0, |
| "grad_norm": 11.3125, |
| "learning_rate": 8.98902533967618e-06, |
| "loss": 0.8208, |
| "num_input_tokens_seen": 532504, |
| "step": 855 |
| }, |
| { |
| "epoch": 15.0, |
| "eval_loss": 1.0203993320465088, |
| "eval_runtime": 0.6367, |
| "eval_samples_per_second": 39.267, |
| "eval_steps_per_second": 10.995, |
| "num_input_tokens_seen": 532504, |
| "step": 855 |
| }, |
| { |
| "epoch": 15.087719298245615, |
| "grad_norm": 9.9375, |
| "learning_rate": 8.696959563622174e-06, |
| "loss": 0.956, |
| "num_input_tokens_seen": 535544, |
| "step": 860 |
| }, |
| { |
| "epoch": 15.175438596491228, |
| "grad_norm": 9.125, |
| "learning_rate": 8.40871503495947e-06, |
| "loss": 0.9412, |
| "num_input_tokens_seen": 538744, |
| "step": 865 |
| }, |
| { |
| "epoch": 15.263157894736842, |
| "grad_norm": 7.5, |
| "learning_rate": 8.124359314929622e-06, |
| "loss": 0.9152, |
| "num_input_tokens_seen": 542040, |
| "step": 870 |
| }, |
| { |
| "epoch": 15.350877192982455, |
| "grad_norm": 8.25, |
| "learning_rate": 7.843959053281663e-06, |
| "loss": 1.0541, |
| "num_input_tokens_seen": 544888, |
| "step": 875 |
| }, |
| { |
| "epoch": 15.43859649122807, |
| "grad_norm": 8.125, |
| "learning_rate": 7.5675799726501155e-06, |
| "loss": 1.0168, |
| "num_input_tokens_seen": 547640, |
| "step": 880 |
| }, |
| { |
| "epoch": 15.526315789473685, |
| "grad_norm": 5.34375, |
| "learning_rate": 7.295286853150391e-06, |
| "loss": 0.9703, |
| "num_input_tokens_seen": 551224, |
| "step": 885 |
| }, |
| { |
| "epoch": 15.614035087719298, |
| "grad_norm": 9.375, |
| "learning_rate": 7.027143517195023e-06, |
| "loss": 0.9146, |
| "num_input_tokens_seen": 554264, |
| "step": 890 |
| }, |
| { |
| "epoch": 15.701754385964913, |
| "grad_norm": 6.25, |
| "learning_rate": 6.763212814534484e-06, |
| "loss": 0.9484, |
| "num_input_tokens_seen": 557880, |
| "step": 895 |
| }, |
| { |
| "epoch": 15.789473684210526, |
| "grad_norm": 7.34375, |
| "learning_rate": 6.503556607525838e-06, |
| "loss": 0.868, |
| "num_input_tokens_seen": 560376, |
| "step": 900 |
| }, |
| { |
| "epoch": 15.87719298245614, |
| "grad_norm": 9.75, |
| "learning_rate": 6.248235756632984e-06, |
| "loss": 1.0191, |
| "num_input_tokens_seen": 563864, |
| "step": 905 |
| }, |
| { |
| "epoch": 15.964912280701755, |
| "grad_norm": 8.9375, |
| "learning_rate": 5.997310106161589e-06, |
| "loss": 0.8281, |
| "num_input_tokens_seen": 567352, |
| "step": 910 |
| }, |
| { |
| "epoch": 16.0, |
| "eval_loss": 1.0124242305755615, |
| "eval_runtime": 0.6381, |
| "eval_samples_per_second": 39.182, |
| "eval_steps_per_second": 10.971, |
| "num_input_tokens_seen": 567952, |
| "step": 912 |
| }, |
| { |
| "epoch": 16.05263157894737, |
| "grad_norm": 11.25, |
| "learning_rate": 5.7508384702323226e-06, |
| "loss": 1.3169, |
| "num_input_tokens_seen": 570448, |
| "step": 915 |
| }, |
| { |
| "epoch": 16.140350877192983, |
| "grad_norm": 7.53125, |
| "learning_rate": 5.508878618995439e-06, |
| "loss": 0.8373, |
| "num_input_tokens_seen": 573808, |
| "step": 920 |
| }, |
| { |
| "epoch": 16.228070175438596, |
| "grad_norm": 6.9375, |
| "learning_rate": 5.271487265090163e-06, |
| "loss": 0.8243, |
| "num_input_tokens_seen": 577200, |
| "step": 925 |
| }, |
| { |
| "epoch": 16.31578947368421, |
| "grad_norm": 6.71875, |
| "learning_rate": 5.038720050351842e-06, |
| "loss": 0.849, |
| "num_input_tokens_seen": 580240, |
| "step": 930 |
| }, |
| { |
| "epoch": 16.403508771929825, |
| "grad_norm": 6.59375, |
| "learning_rate": 4.810631532770182e-06, |
| "loss": 0.9882, |
| "num_input_tokens_seen": 583120, |
| "step": 935 |
| }, |
| { |
| "epoch": 16.49122807017544, |
| "grad_norm": 9.0, |
| "learning_rate": 4.587275173701428e-06, |
| "loss": 0.8474, |
| "num_input_tokens_seen": 586032, |
| "step": 940 |
| }, |
| { |
| "epoch": 16.57894736842105, |
| "grad_norm": 5.40625, |
| "learning_rate": 4.368703325337667e-06, |
| "loss": 0.8131, |
| "num_input_tokens_seen": 589392, |
| "step": 945 |
| }, |
| { |
| "epoch": 16.666666666666668, |
| "grad_norm": 9.1875, |
| "learning_rate": 4.154967218436037e-06, |
| "loss": 1.1143, |
| "num_input_tokens_seen": 592624, |
| "step": 950 |
| }, |
| { |
| "epoch": 16.75438596491228, |
| "grad_norm": 7.375, |
| "learning_rate": 3.94611695031086e-06, |
| "loss": 1.1119, |
| "num_input_tokens_seen": 595440, |
| "step": 955 |
| }, |
| { |
| "epoch": 16.842105263157894, |
| "grad_norm": 10.5, |
| "learning_rate": 3.74220147309135e-06, |
| "loss": 1.0766, |
| "num_input_tokens_seen": 598640, |
| "step": 960 |
| }, |
| { |
| "epoch": 16.92982456140351, |
| "grad_norm": 9.8125, |
| "learning_rate": 3.543268582247844e-06, |
| "loss": 1.0021, |
| "num_input_tokens_seen": 601584, |
| "step": 965 |
| }, |
| { |
| "epoch": 17.0, |
| "eval_loss": 1.0094741582870483, |
| "eval_runtime": 0.6385, |
| "eval_samples_per_second": 39.156, |
| "eval_steps_per_second": 10.964, |
| "num_input_tokens_seen": 603760, |
| "step": 969 |
| }, |
| { |
| "epoch": 17.017543859649123, |
| "grad_norm": 6.65625, |
| "learning_rate": 3.3493649053890326e-06, |
| "loss": 0.9677, |
| "num_input_tokens_seen": 604464, |
| "step": 970 |
| }, |
| { |
| "epoch": 17.105263157894736, |
| "grad_norm": 7.28125, |
| "learning_rate": 3.1605358913330385e-06, |
| "loss": 0.8369, |
| "num_input_tokens_seen": 607600, |
| "step": 975 |
| }, |
| { |
| "epoch": 17.19298245614035, |
| "grad_norm": 8.375, |
| "learning_rate": 2.9768257994546662e-06, |
| "loss": 0.7602, |
| "num_input_tokens_seen": 611152, |
| "step": 980 |
| }, |
| { |
| "epoch": 17.280701754385966, |
| "grad_norm": 7.84375, |
| "learning_rate": 2.7982776893115627e-06, |
| "loss": 0.8405, |
| "num_input_tokens_seen": 614768, |
| "step": 985 |
| }, |
| { |
| "epoch": 17.36842105263158, |
| "grad_norm": 6.6875, |
| "learning_rate": 2.624933410551508e-06, |
| "loss": 0.9494, |
| "num_input_tokens_seen": 618384, |
| "step": 990 |
| }, |
| { |
| "epoch": 17.45614035087719, |
| "grad_norm": 6.375, |
| "learning_rate": 2.456833593103361e-06, |
| "loss": 1.0352, |
| "num_input_tokens_seen": 621424, |
| "step": 995 |
| }, |
| { |
| "epoch": 17.54385964912281, |
| "grad_norm": 9.6875, |
| "learning_rate": 2.2940176376538445e-06, |
| "loss": 1.0013, |
| "num_input_tokens_seen": 624464, |
| "step": 1000 |
| }, |
| { |
| "epoch": 17.63157894736842, |
| "grad_norm": 7.9375, |
| "learning_rate": 2.136523706412477e-06, |
| "loss": 0.9752, |
| "num_input_tokens_seen": 627824, |
| "step": 1005 |
| }, |
| { |
| "epoch": 17.719298245614034, |
| "grad_norm": 12.0, |
| "learning_rate": 1.984388714166799e-06, |
| "loss": 1.0049, |
| "num_input_tokens_seen": 631056, |
| "step": 1010 |
| }, |
| { |
| "epoch": 17.80701754385965, |
| "grad_norm": 6.96875, |
| "learning_rate": 1.837648319629956e-06, |
| "loss": 0.9531, |
| "num_input_tokens_seen": 634128, |
| "step": 1015 |
| }, |
| { |
| "epoch": 17.894736842105264, |
| "grad_norm": 8.6875, |
| "learning_rate": 1.6963369170826943e-06, |
| "loss": 1.1898, |
| "num_input_tokens_seen": 636656, |
| "step": 1020 |
| }, |
| { |
| "epoch": 17.982456140350877, |
| "grad_norm": 9.0, |
| "learning_rate": 1.5604876283117326e-06, |
| "loss": 1.0184, |
| "num_input_tokens_seen": 639696, |
| "step": 1025 |
| }, |
| { |
| "epoch": 18.0, |
| "eval_loss": 1.0096304416656494, |
| "eval_runtime": 0.6358, |
| "eval_samples_per_second": 39.319, |
| "eval_steps_per_second": 11.009, |
| "num_input_tokens_seen": 639784, |
| "step": 1026 |
| }, |
| { |
| "epoch": 18.07017543859649, |
| "grad_norm": 7.53125, |
| "learning_rate": 1.4301322948464147e-06, |
| "loss": 0.9577, |
| "num_input_tokens_seen": 642280, |
| "step": 1030 |
| }, |
| { |
| "epoch": 18.157894736842106, |
| "grad_norm": 7.65625, |
| "learning_rate": 1.3053014704953987e-06, |
| "loss": 0.9867, |
| "num_input_tokens_seen": 645128, |
| "step": 1035 |
| }, |
| { |
| "epoch": 18.24561403508772, |
| "grad_norm": 8.25, |
| "learning_rate": 1.1860244141851773e-06, |
| "loss": 1.0248, |
| "num_input_tokens_seen": 647528, |
| "step": 1040 |
| }, |
| { |
| "epoch": 18.333333333333332, |
| "grad_norm": 7.90625, |
| "learning_rate": 1.0723290831021471e-06, |
| "loss": 0.9925, |
| "num_input_tokens_seen": 651560, |
| "step": 1045 |
| }, |
| { |
| "epoch": 18.42105263157895, |
| "grad_norm": 8.6875, |
| "learning_rate": 9.642421261397472e-07, |
| "loss": 1.1599, |
| "num_input_tokens_seen": 654568, |
| "step": 1050 |
| }, |
| { |
| "epoch": 18.50877192982456, |
| "grad_norm": 7.875, |
| "learning_rate": 8.617888776522642e-07, |
| "loss": 0.9582, |
| "num_input_tokens_seen": 657448, |
| "step": 1055 |
| }, |
| { |
| "epoch": 18.596491228070175, |
| "grad_norm": 8.625, |
| "learning_rate": 7.649933515167407e-07, |
| "loss": 0.9608, |
| "num_input_tokens_seen": 660392, |
| "step": 1060 |
| }, |
| { |
| "epoch": 18.68421052631579, |
| "grad_norm": 8.3125, |
| "learning_rate": 6.738782355044049e-07, |
| "loss": 1.1286, |
| "num_input_tokens_seen": 663656, |
| "step": 1065 |
| }, |
| { |
| "epoch": 18.771929824561404, |
| "grad_norm": 6.6875, |
| "learning_rate": 5.88464885962911e-07, |
| "loss": 0.7666, |
| "num_input_tokens_seen": 667208, |
| "step": 1070 |
| }, |
| { |
| "epoch": 18.859649122807017, |
| "grad_norm": 9.8125, |
| "learning_rate": 5.087733228106517e-07, |
| "loss": 0.9099, |
| "num_input_tokens_seen": 670568, |
| "step": 1075 |
| }, |
| { |
| "epoch": 18.94736842105263, |
| "grad_norm": 5.9375, |
| "learning_rate": 4.3482222484432513e-07, |
| "loss": 0.7952, |
| "num_input_tokens_seen": 674024, |
| "step": 1080 |
| }, |
| { |
| "epoch": 19.0, |
| "eval_loss": 1.0225871801376343, |
| "eval_runtime": 0.6407, |
| "eval_samples_per_second": 39.022, |
| "eval_steps_per_second": 10.926, |
| "num_input_tokens_seen": 675800, |
| "step": 1083 |
| }, |
| { |
| "epoch": 19.035087719298247, |
| "grad_norm": 7.78125, |
| "learning_rate": 3.666289253608235e-07, |
| "loss": 0.7541, |
| "num_input_tokens_seen": 676920, |
| "step": 1085 |
| }, |
| { |
| "epoch": 19.12280701754386, |
| "grad_norm": 6.78125, |
| "learning_rate": 3.0420940809451624e-07, |
| "loss": 0.86, |
| "num_input_tokens_seen": 679896, |
| "step": 1090 |
| }, |
| { |
| "epoch": 19.210526315789473, |
| "grad_norm": 8.1875, |
| "learning_rate": 2.47578303470844e-07, |
| "loss": 0.9877, |
| "num_input_tokens_seen": 683288, |
| "step": 1095 |
| }, |
| { |
| "epoch": 19.29824561403509, |
| "grad_norm": 7.15625, |
| "learning_rate": 1.96748885177106e-07, |
| "loss": 1.0717, |
| "num_input_tokens_seen": 686168, |
| "step": 1100 |
| }, |
| { |
| "epoch": 19.385964912280702, |
| "grad_norm": 8.875, |
| "learning_rate": 1.517330670512629e-07, |
| "loss": 0.8787, |
| "num_input_tokens_seen": 689016, |
| "step": 1105 |
| }, |
| { |
| "epoch": 19.473684210526315, |
| "grad_norm": 9.0, |
| "learning_rate": 1.125414002894759e-07, |
| "loss": 1.0008, |
| "num_input_tokens_seen": 692376, |
| "step": 1110 |
| }, |
| { |
| "epoch": 19.56140350877193, |
| "grad_norm": 8.5, |
| "learning_rate": 7.918307097301014e-08, |
| "loss": 0.9755, |
| "num_input_tokens_seen": 695480, |
| "step": 1115 |
| }, |
| { |
| "epoch": 19.649122807017545, |
| "grad_norm": 6.96875, |
| "learning_rate": 5.166589791513465e-08, |
| "loss": 0.978, |
| "num_input_tokens_seen": 698424, |
| "step": 1120 |
| }, |
| { |
| "epoch": 19.736842105263158, |
| "grad_norm": 11.625, |
| "learning_rate": 2.999633082847453e-08, |
| "loss": 0.9043, |
| "num_input_tokens_seen": 701592, |
| "step": 1125 |
| }, |
| { |
| "epoch": 19.82456140350877, |
| "grad_norm": 8.25, |
| "learning_rate": 1.4179448813278484e-08, |
| "loss": 0.9811, |
| "num_input_tokens_seen": 704504, |
| "step": 1130 |
| }, |
| { |
| "epoch": 19.912280701754387, |
| "grad_norm": 7.0, |
| "learning_rate": 4.218959166932268e-09, |
| "loss": 1.009, |
| "num_input_tokens_seen": 707992, |
| "step": 1135 |
| }, |
| { |
| "epoch": 20.0, |
| "grad_norm": 9.0, |
| "learning_rate": 1.1719651499819683e-10, |
| "loss": 0.8799, |
| "num_input_tokens_seen": 711112, |
| "step": 1140 |
| }, |
| { |
| "epoch": 20.0, |
| "eval_loss": 1.006801962852478, |
| "eval_runtime": 0.6395, |
| "eval_samples_per_second": 39.094, |
| "eval_steps_per_second": 10.946, |
| "num_input_tokens_seen": 711112, |
| "step": 1140 |
| }, |
| { |
| "epoch": 20.0, |
| "num_input_tokens_seen": 711112, |
| "step": 1140, |
| "total_flos": 3.2022187918884864e+16, |
| "train_loss": 0.9910672608174775, |
| "train_runtime": 262.2808, |
| "train_samples_per_second": 17.157, |
| "train_steps_per_second": 4.346 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 1140, |
| "num_input_tokens_seen": 711112, |
| "num_train_epochs": 20, |
| "save_steps": 57, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3.2022187918884864e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|