| { |
| "best_global_step": 171, |
| "best_metric": 0.14093659818172455, |
| "best_model_checkpoint": "saves_multiple/lora/llama-3-8b-instruct/train_cb_123_1760637640/checkpoint-171", |
| "epoch": 20.0, |
| "eval_steps": 57, |
| "global_step": 1140, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08771929824561403, |
| "grad_norm": 7.363135814666748, |
| "learning_rate": 1.7543859649122807e-06, |
| "loss": 1.1115, |
| "num_input_tokens_seen": 3552, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.17543859649122806, |
| "grad_norm": 6.227541446685791, |
| "learning_rate": 3.9473684210526315e-06, |
| "loss": 1.208, |
| "num_input_tokens_seen": 7264, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2631578947368421, |
| "grad_norm": 7.40158224105835, |
| "learning_rate": 6.140350877192982e-06, |
| "loss": 1.0576, |
| "num_input_tokens_seen": 10528, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.3508771929824561, |
| "grad_norm": 7.5627546310424805, |
| "learning_rate": 8.333333333333334e-06, |
| "loss": 0.9079, |
| "num_input_tokens_seen": 14784, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.43859649122807015, |
| "grad_norm": 9.372406005859375, |
| "learning_rate": 1.0526315789473684e-05, |
| "loss": 0.8516, |
| "num_input_tokens_seen": 18112, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.5263157894736842, |
| "grad_norm": 3.3539698123931885, |
| "learning_rate": 1.2719298245614037e-05, |
| "loss": 0.5545, |
| "num_input_tokens_seen": 20736, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.6140350877192983, |
| "grad_norm": 5.2519850730896, |
| "learning_rate": 1.4912280701754386e-05, |
| "loss": 0.1713, |
| "num_input_tokens_seen": 24896, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.7017543859649122, |
| "grad_norm": 3.079636335372925, |
| "learning_rate": 1.7105263157894737e-05, |
| "loss": 0.0993, |
| "num_input_tokens_seen": 28160, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.7894736842105263, |
| "grad_norm": 2.2831075191497803, |
| "learning_rate": 1.929824561403509e-05, |
| "loss": 0.3283, |
| "num_input_tokens_seen": 31040, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.8771929824561403, |
| "grad_norm": 1.2093722820281982, |
| "learning_rate": 2.149122807017544e-05, |
| "loss": 0.0748, |
| "num_input_tokens_seen": 33760, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.9649122807017544, |
| "grad_norm": 1.744345784187317, |
| "learning_rate": 2.368421052631579e-05, |
| "loss": 0.2136, |
| "num_input_tokens_seen": 36416, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 0.20269431173801422, |
| "eval_runtime": 0.5828, |
| "eval_samples_per_second": 42.9, |
| "eval_steps_per_second": 12.012, |
| "num_input_tokens_seen": 37160, |
| "step": 57 |
| }, |
| { |
| "epoch": 1.0526315789473684, |
| "grad_norm": 1.3665523529052734, |
| "learning_rate": 2.5877192982456143e-05, |
| "loss": 0.0067, |
| "num_input_tokens_seen": 39176, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.1403508771929824, |
| "grad_norm": 9.074639320373535, |
| "learning_rate": 2.8070175438596492e-05, |
| "loss": 0.2564, |
| "num_input_tokens_seen": 42632, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.2280701754385965, |
| "grad_norm": 4.407818794250488, |
| "learning_rate": 3.0263157894736844e-05, |
| "loss": 0.5164, |
| "num_input_tokens_seen": 45704, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.3157894736842106, |
| "grad_norm": 1.4475651979446411, |
| "learning_rate": 3.24561403508772e-05, |
| "loss": 0.1478, |
| "num_input_tokens_seen": 49448, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.4035087719298245, |
| "grad_norm": 11.214638710021973, |
| "learning_rate": 3.4649122807017546e-05, |
| "loss": 0.1833, |
| "num_input_tokens_seen": 52456, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.4912280701754386, |
| "grad_norm": 2.834925413131714, |
| "learning_rate": 3.6842105263157895e-05, |
| "loss": 0.1225, |
| "num_input_tokens_seen": 56488, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.5789473684210527, |
| "grad_norm": 4.807704925537109, |
| "learning_rate": 3.9035087719298244e-05, |
| "loss": 0.3505, |
| "num_input_tokens_seen": 59208, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 0.503318190574646, |
| "learning_rate": 4.12280701754386e-05, |
| "loss": 0.0171, |
| "num_input_tokens_seen": 62696, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.7543859649122808, |
| "grad_norm": 0.03547332435846329, |
| "learning_rate": 4.342105263157895e-05, |
| "loss": 0.1355, |
| "num_input_tokens_seen": 66024, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.8421052631578947, |
| "grad_norm": 4.252735614776611, |
| "learning_rate": 4.56140350877193e-05, |
| "loss": 0.279, |
| "num_input_tokens_seen": 69000, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.9298245614035088, |
| "grad_norm": 0.04310083016753197, |
| "learning_rate": 4.780701754385965e-05, |
| "loss": 0.0799, |
| "num_input_tokens_seen": 72072, |
| "step": 110 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 0.1580265313386917, |
| "eval_runtime": 0.5861, |
| "eval_samples_per_second": 42.653, |
| "eval_steps_per_second": 11.943, |
| "num_input_tokens_seen": 73720, |
| "step": 114 |
| }, |
| { |
| "epoch": 2.017543859649123, |
| "grad_norm": 0.37703779339790344, |
| "learning_rate": 5e-05, |
| "loss": 0.0304, |
| "num_input_tokens_seen": 74264, |
| "step": 115 |
| }, |
| { |
| "epoch": 2.1052631578947367, |
| "grad_norm": 0.08708511292934418, |
| "learning_rate": 4.999707014206475e-05, |
| "loss": 0.0019, |
| "num_input_tokens_seen": 77528, |
| "step": 120 |
| }, |
| { |
| "epoch": 2.192982456140351, |
| "grad_norm": 0.0757942795753479, |
| "learning_rate": 4.9988281254984414e-05, |
| "loss": 0.0925, |
| "num_input_tokens_seen": 80600, |
| "step": 125 |
| }, |
| { |
| "epoch": 2.280701754385965, |
| "grad_norm": 0.2999916076660156, |
| "learning_rate": 4.997363539877422e-05, |
| "loss": 0.038, |
| "num_input_tokens_seen": 83800, |
| "step": 130 |
| }, |
| { |
| "epoch": 2.3684210526315788, |
| "grad_norm": 1.9646483659744263, |
| "learning_rate": 4.9953136006256415e-05, |
| "loss": 0.1326, |
| "num_input_tokens_seen": 86552, |
| "step": 135 |
| }, |
| { |
| "epoch": 2.456140350877193, |
| "grad_norm": 2.8404128551483154, |
| "learning_rate": 4.9926787882255636e-05, |
| "loss": 0.1324, |
| "num_input_tokens_seen": 90008, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "grad_norm": 0.5288718938827515, |
| "learning_rate": 4.9894597202472696e-05, |
| "loss": 0.2619, |
| "num_input_tokens_seen": 93496, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.6315789473684212, |
| "grad_norm": 1.504030704498291, |
| "learning_rate": 4.985657151203706e-05, |
| "loss": 0.0177, |
| "num_input_tokens_seen": 96888, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.719298245614035, |
| "grad_norm": 1.668042778968811, |
| "learning_rate": 4.9812719723738435e-05, |
| "loss": 0.0187, |
| "num_input_tokens_seen": 100728, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.807017543859649, |
| "grad_norm": 0.023328855633735657, |
| "learning_rate": 4.976305211593758e-05, |
| "loss": 0.1381, |
| "num_input_tokens_seen": 103960, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.8947368421052633, |
| "grad_norm": 20.24945068359375, |
| "learning_rate": 4.970758033015731e-05, |
| "loss": 0.0955, |
| "num_input_tokens_seen": 106648, |
| "step": 165 |
| }, |
| { |
| "epoch": 2.982456140350877, |
| "grad_norm": 0.04455365613102913, |
| "learning_rate": 4.9646317368353743e-05, |
| "loss": 0.1237, |
| "num_input_tokens_seen": 110136, |
| "step": 170 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 0.14093659818172455, |
| "eval_runtime": 0.5912, |
| "eval_samples_per_second": 42.289, |
| "eval_steps_per_second": 11.841, |
| "num_input_tokens_seen": 110296, |
| "step": 171 |
| }, |
| { |
| "epoch": 3.0701754385964914, |
| "grad_norm": 0.05890918895602226, |
| "learning_rate": 4.957927758986888e-05, |
| "loss": 0.0116, |
| "num_input_tokens_seen": 112984, |
| "step": 175 |
| }, |
| { |
| "epoch": 3.1578947368421053, |
| "grad_norm": 1.7225713729858398, |
| "learning_rate": 4.9506476708064865e-05, |
| "loss": 0.0021, |
| "num_input_tokens_seen": 116120, |
| "step": 180 |
| }, |
| { |
| "epoch": 3.245614035087719, |
| "grad_norm": 0.005805062595754862, |
| "learning_rate": 4.9427931786641e-05, |
| "loss": 0.0023, |
| "num_input_tokens_seen": 119576, |
| "step": 185 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 4.142322540283203, |
| "learning_rate": 4.93436612356342e-05, |
| "loss": 0.0736, |
| "num_input_tokens_seen": 122968, |
| "step": 190 |
| }, |
| { |
| "epoch": 3.4210526315789473, |
| "grad_norm": 0.007462541572749615, |
| "learning_rate": 4.925368480710385e-05, |
| "loss": 0.1628, |
| "num_input_tokens_seen": 126104, |
| "step": 195 |
| }, |
| { |
| "epoch": 3.5087719298245617, |
| "grad_norm": 1.1968806982040405, |
| "learning_rate": 4.915802359050222e-05, |
| "loss": 0.0178, |
| "num_input_tokens_seen": 129368, |
| "step": 200 |
| }, |
| { |
| "epoch": 3.5964912280701755, |
| "grad_norm": 1.4944263696670532, |
| "learning_rate": 4.905670000773126e-05, |
| "loss": 0.0648, |
| "num_input_tokens_seen": 132888, |
| "step": 205 |
| }, |
| { |
| "epoch": 3.6842105263157894, |
| "grad_norm": 0.07721724361181259, |
| "learning_rate": 4.894973780788722e-05, |
| "loss": 0.0495, |
| "num_input_tokens_seen": 135832, |
| "step": 210 |
| }, |
| { |
| "epoch": 3.7719298245614032, |
| "grad_norm": 0.13424545526504517, |
| "learning_rate": 4.88371620616941e-05, |
| "loss": 0.149, |
| "num_input_tokens_seen": 138936, |
| "step": 215 |
| }, |
| { |
| "epoch": 3.8596491228070176, |
| "grad_norm": 0.025514209643006325, |
| "learning_rate": 4.871899915562736e-05, |
| "loss": 0.0036, |
| "num_input_tokens_seen": 142488, |
| "step": 220 |
| }, |
| { |
| "epoch": 3.9473684210526314, |
| "grad_norm": 9.592844009399414, |
| "learning_rate": 4.8595276785729236e-05, |
| "loss": 0.0303, |
| "num_input_tokens_seen": 146808, |
| "step": 225 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_loss": 0.17506125569343567, |
| "eval_runtime": 0.5941, |
| "eval_samples_per_second": 42.081, |
| "eval_steps_per_second": 11.783, |
| "num_input_tokens_seen": 147784, |
| "step": 228 |
| }, |
| { |
| "epoch": 4.035087719298246, |
| "grad_norm": 0.047150205820798874, |
| "learning_rate": 4.846602395111711e-05, |
| "loss": 0.0026, |
| "num_input_tokens_seen": 149448, |
| "step": 230 |
| }, |
| { |
| "epoch": 4.12280701754386, |
| "grad_norm": 0.7605629563331604, |
| "learning_rate": 4.833127094718643e-05, |
| "loss": 0.0026, |
| "num_input_tokens_seen": 152488, |
| "step": 235 |
| }, |
| { |
| "epoch": 4.2105263157894735, |
| "grad_norm": 10.281540870666504, |
| "learning_rate": 4.819104935850983e-05, |
| "loss": 0.0631, |
| "num_input_tokens_seen": 155208, |
| "step": 240 |
| }, |
| { |
| "epoch": 4.298245614035087, |
| "grad_norm": 0.5210893750190735, |
| "learning_rate": 4.804539205143405e-05, |
| "loss": 0.0022, |
| "num_input_tokens_seen": 158088, |
| "step": 245 |
| }, |
| { |
| "epoch": 4.385964912280702, |
| "grad_norm": 0.003426361596211791, |
| "learning_rate": 4.789433316637644e-05, |
| "loss": 0.0107, |
| "num_input_tokens_seen": 161032, |
| "step": 250 |
| }, |
| { |
| "epoch": 4.473684210526316, |
| "grad_norm": 0.03823290765285492, |
| "learning_rate": 4.7737908109822854e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 164744, |
| "step": 255 |
| }, |
| { |
| "epoch": 4.56140350877193, |
| "grad_norm": 0.07661211490631104, |
| "learning_rate": 4.757615354602874e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 167592, |
| "step": 260 |
| }, |
| { |
| "epoch": 4.649122807017544, |
| "grad_norm": 0.00253407284617424, |
| "learning_rate": 4.7409107388425504e-05, |
| "loss": 0.0094, |
| "num_input_tokens_seen": 170696, |
| "step": 265 |
| }, |
| { |
| "epoch": 4.7368421052631575, |
| "grad_norm": 8.058258056640625, |
| "learning_rate": 4.723680879073396e-05, |
| "loss": 0.0047, |
| "num_input_tokens_seen": 174024, |
| "step": 270 |
| }, |
| { |
| "epoch": 4.824561403508772, |
| "grad_norm": 0.0070835198275744915, |
| "learning_rate": 4.70592981377872e-05, |
| "loss": 0.0543, |
| "num_input_tokens_seen": 178152, |
| "step": 275 |
| }, |
| { |
| "epoch": 4.912280701754386, |
| "grad_norm": 0.004841147921979427, |
| "learning_rate": 4.6876617036064844e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 181768, |
| "step": 280 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 1.8160358667373657, |
| "learning_rate": 4.668880830394093e-05, |
| "loss": 0.0013, |
| "num_input_tokens_seen": 184368, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.0, |
| "eval_loss": 0.21350887417793274, |
| "eval_runtime": 0.5947, |
| "eval_samples_per_second": 42.038, |
| "eval_steps_per_second": 11.771, |
| "num_input_tokens_seen": 184368, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "grad_norm": 0.007710246369242668, |
| "learning_rate": 4.649591596164778e-05, |
| "loss": 0.0154, |
| "num_input_tokens_seen": 187312, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.175438596491228, |
| "grad_norm": 0.007341461721807718, |
| "learning_rate": 4.629798522095818e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 190736, |
| "step": 295 |
| }, |
| { |
| "epoch": 5.2631578947368425, |
| "grad_norm": 0.02518148347735405, |
| "learning_rate": 4.6095062474588225e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 194480, |
| "step": 300 |
| }, |
| { |
| "epoch": 5.350877192982456, |
| "grad_norm": 0.008086412213742733, |
| "learning_rate": 4.588719528532342e-05, |
| "loss": 0.0094, |
| "num_input_tokens_seen": 198576, |
| "step": 305 |
| }, |
| { |
| "epoch": 5.43859649122807, |
| "grad_norm": 0.027759520336985588, |
| "learning_rate": 4.5674432374870455e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 201808, |
| "step": 310 |
| }, |
| { |
| "epoch": 5.526315789473684, |
| "grad_norm": 0.010072549805045128, |
| "learning_rate": 4.545682361243748e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 204912, |
| "step": 315 |
| }, |
| { |
| "epoch": 5.614035087719298, |
| "grad_norm": 0.02954721823334694, |
| "learning_rate": 4.5234420003045236e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 207760, |
| "step": 320 |
| }, |
| { |
| "epoch": 5.701754385964913, |
| "grad_norm": 0.012121260166168213, |
| "learning_rate": 4.5007273675572104e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 210544, |
| "step": 325 |
| }, |
| { |
| "epoch": 5.7894736842105265, |
| "grad_norm": 0.4865941107273102, |
| "learning_rate": 4.4775437870535685e-05, |
| "loss": 0.0006, |
| "num_input_tokens_seen": 214480, |
| "step": 330 |
| }, |
| { |
| "epoch": 5.87719298245614, |
| "grad_norm": 0.689924955368042, |
| "learning_rate": 4.4538966927613836e-05, |
| "loss": 0.0005, |
| "num_input_tokens_seen": 218064, |
| "step": 335 |
| }, |
| { |
| "epoch": 5.964912280701754, |
| "grad_norm": 0.0018874453380703926, |
| "learning_rate": 4.4297916272908024e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 220848, |
| "step": 340 |
| }, |
| { |
| "epoch": 6.0, |
| "eval_loss": 0.2931666076183319, |
| "eval_runtime": 0.5929, |
| "eval_samples_per_second": 42.168, |
| "eval_steps_per_second": 11.807, |
| "num_input_tokens_seen": 221536, |
| "step": 342 |
| }, |
| { |
| "epoch": 6.052631578947368, |
| "grad_norm": 0.007419330533593893, |
| "learning_rate": 4.405234240595214e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 223616, |
| "step": 345 |
| }, |
| { |
| "epoch": 6.140350877192983, |
| "grad_norm": 0.01061911042779684, |
| "learning_rate": 4.3802302886469606e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 227008, |
| "step": 350 |
| }, |
| { |
| "epoch": 6.228070175438597, |
| "grad_norm": 0.1701219081878662, |
| "learning_rate": 4.3547856320882044e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 230752, |
| "step": 355 |
| }, |
| { |
| "epoch": 6.315789473684211, |
| "grad_norm": 0.8761810660362244, |
| "learning_rate": 4.328906234857259e-05, |
| "loss": 0.0005, |
| "num_input_tokens_seen": 234208, |
| "step": 360 |
| }, |
| { |
| "epoch": 6.4035087719298245, |
| "grad_norm": 0.002926096087321639, |
| "learning_rate": 4.302598162790712e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 237664, |
| "step": 365 |
| }, |
| { |
| "epoch": 6.491228070175438, |
| "grad_norm": 0.0033948016352951527, |
| "learning_rate": 4.27586758220166e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 240704, |
| "step": 370 |
| }, |
| { |
| "epoch": 6.578947368421053, |
| "grad_norm": 0.004731409717351198, |
| "learning_rate": 4.2487207584343955e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 244256, |
| "step": 375 |
| }, |
| { |
| "epoch": 6.666666666666667, |
| "grad_norm": 0.0004714055103249848, |
| "learning_rate": 4.2211640543958796e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 246816, |
| "step": 380 |
| }, |
| { |
| "epoch": 6.754385964912281, |
| "grad_norm": 0.018628528341650963, |
| "learning_rate": 4.193203929064353e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 249728, |
| "step": 385 |
| }, |
| { |
| "epoch": 6.842105263157895, |
| "grad_norm": 0.005702705588191748, |
| "learning_rate": 4.164846935975421e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 252480, |
| "step": 390 |
| }, |
| { |
| "epoch": 6.9298245614035086, |
| "grad_norm": 0.022560538724064827, |
| "learning_rate": 4.136099721685983e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 256480, |
| "step": 395 |
| }, |
| { |
| "epoch": 7.0, |
| "eval_loss": 0.2913901209831238, |
| "eval_runtime": 0.5903, |
| "eval_samples_per_second": 42.352, |
| "eval_steps_per_second": 11.859, |
| "num_input_tokens_seen": 258720, |
| "step": 399 |
| }, |
| { |
| "epoch": 7.017543859649122, |
| "grad_norm": 0.0014436967903748155, |
| "learning_rate": 4.1069690242163484e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 259424, |
| "step": 400 |
| }, |
| { |
| "epoch": 7.105263157894737, |
| "grad_norm": 0.0007705138414166868, |
| "learning_rate": 4.0774616714709316e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 262656, |
| "step": 405 |
| }, |
| { |
| "epoch": 7.192982456140351, |
| "grad_norm": 0.0007978085777722299, |
| "learning_rate": 4.047584579637857e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 265408, |
| "step": 410 |
| }, |
| { |
| "epoch": 7.280701754385965, |
| "grad_norm": 0.005320470314472914, |
| "learning_rate": 4.0173447515678916e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 268992, |
| "step": 415 |
| }, |
| { |
| "epoch": 7.368421052631579, |
| "grad_norm": 0.0007099729846231639, |
| "learning_rate": 3.986749275133057e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 272288, |
| "step": 420 |
| }, |
| { |
| "epoch": 7.456140350877193, |
| "grad_norm": 0.0009340548422187567, |
| "learning_rate": 3.955805321565304e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 275104, |
| "step": 425 |
| }, |
| { |
| "epoch": 7.543859649122807, |
| "grad_norm": 0.0007877979660406709, |
| "learning_rate": 3.9245201437756654e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 278272, |
| "step": 430 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "grad_norm": 0.009080762043595314, |
| "learning_rate": 3.892901074654255e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 282048, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.719298245614035, |
| "grad_norm": 0.0011492838384583592, |
| "learning_rate": 3.860955525351516e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 285536, |
| "step": 440 |
| }, |
| { |
| "epoch": 7.807017543859649, |
| "grad_norm": 0.006582766305655241, |
| "learning_rate": 3.82869098354114e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 288416, |
| "step": 445 |
| }, |
| { |
| "epoch": 7.894736842105263, |
| "grad_norm": 0.0016134028555825353, |
| "learning_rate": 3.796115011665034e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 291968, |
| "step": 450 |
| }, |
| { |
| "epoch": 7.982456140350877, |
| "grad_norm": 0.005474638193845749, |
| "learning_rate": 3.763235245160775e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 295296, |
| "step": 455 |
| }, |
| { |
| "epoch": 8.0, |
| "eval_loss": 0.30601078271865845, |
| "eval_runtime": 0.5899, |
| "eval_samples_per_second": 42.384, |
| "eval_steps_per_second": 11.867, |
| "num_input_tokens_seen": 295408, |
| "step": 456 |
| }, |
| { |
| "epoch": 8.070175438596491, |
| "grad_norm": 0.0012144362553954124, |
| "learning_rate": 3.7300593906719464e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 298480, |
| "step": 460 |
| }, |
| { |
| "epoch": 8.157894736842104, |
| "grad_norm": 0.0005654149572364986, |
| "learning_rate": 3.69659522424179e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 301904, |
| "step": 465 |
| }, |
| { |
| "epoch": 8.24561403508772, |
| "grad_norm": 0.0009397775866091251, |
| "learning_rate": 3.662850589490592e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 305488, |
| "step": 470 |
| }, |
| { |
| "epoch": 8.333333333333334, |
| "grad_norm": 0.0011858942452818155, |
| "learning_rate": 3.628833395777224e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 308336, |
| "step": 475 |
| }, |
| { |
| "epoch": 8.421052631578947, |
| "grad_norm": 0.0015620685881003737, |
| "learning_rate": 3.59455161634528e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 311600, |
| "step": 480 |
| }, |
| { |
| "epoch": 8.508771929824562, |
| "grad_norm": 0.0012081796303391457, |
| "learning_rate": 3.560013286454242e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 315216, |
| "step": 485 |
| }, |
| { |
| "epoch": 8.596491228070175, |
| "grad_norm": 0.0007230513729155064, |
| "learning_rate": 3.5252265014961006e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 318640, |
| "step": 490 |
| }, |
| { |
| "epoch": 8.68421052631579, |
| "grad_norm": 0.0016971679870039225, |
| "learning_rate": 3.490199415097892e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 321680, |
| "step": 495 |
| }, |
| { |
| "epoch": 8.771929824561404, |
| "grad_norm": 0.0005313905421644449, |
| "learning_rate": 3.45494023721058e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 324464, |
| "step": 500 |
| }, |
| { |
| "epoch": 8.859649122807017, |
| "grad_norm": 0.0005570178618654609, |
| "learning_rate": 3.4194572321847336e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 327888, |
| "step": 505 |
| }, |
| { |
| "epoch": 8.947368421052632, |
| "grad_norm": 0.0004310946096666157, |
| "learning_rate": 3.383758716833459e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 331312, |
| "step": 510 |
| }, |
| { |
| "epoch": 9.0, |
| "eval_loss": 0.3096185326576233, |
| "eval_runtime": 0.5905, |
| "eval_samples_per_second": 42.339, |
| "eval_steps_per_second": 11.855, |
| "num_input_tokens_seen": 332648, |
| "step": 513 |
| }, |
| { |
| "epoch": 9.035087719298245, |
| "grad_norm": 0.00045281994971446693, |
| "learning_rate": 3.347853058483037e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 334120, |
| "step": 515 |
| }, |
| { |
| "epoch": 9.12280701754386, |
| "grad_norm": 0.0005216025747358799, |
| "learning_rate": 3.311748673011709e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 338280, |
| "step": 520 |
| }, |
| { |
| "epoch": 9.210526315789474, |
| "grad_norm": 0.0013580027734860778, |
| "learning_rate": 3.275454022877097e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 341064, |
| "step": 525 |
| }, |
| { |
| "epoch": 9.298245614035087, |
| "grad_norm": 0.000575086975004524, |
| "learning_rate": 3.238977615132697e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 344552, |
| "step": 530 |
| }, |
| { |
| "epoch": 9.385964912280702, |
| "grad_norm": 0.0004719849384855479, |
| "learning_rate": 3.202327999433924e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 347592, |
| "step": 535 |
| }, |
| { |
| "epoch": 9.473684210526315, |
| "grad_norm": 0.000458006834378466, |
| "learning_rate": 3.165513766034167e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 350440, |
| "step": 540 |
| }, |
| { |
| "epoch": 9.56140350877193, |
| "grad_norm": 0.0014565642923116684, |
| "learning_rate": 3.128543543771336e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 353992, |
| "step": 545 |
| }, |
| { |
| "epoch": 9.649122807017545, |
| "grad_norm": 0.0005885666469112039, |
| "learning_rate": 3.091425998045356e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 357544, |
| "step": 550 |
| }, |
| { |
| "epoch": 9.736842105263158, |
| "grad_norm": 0.0005947434110566974, |
| "learning_rate": 3.0541698287870965e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 360872, |
| "step": 555 |
| }, |
| { |
| "epoch": 9.824561403508772, |
| "grad_norm": 0.0004684940504375845, |
| "learning_rate": 3.01678376841921e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 364200, |
| "step": 560 |
| }, |
| { |
| "epoch": 9.912280701754385, |
| "grad_norm": 0.001642635790631175, |
| "learning_rate": 2.9792765798093465e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 367656, |
| "step": 565 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 0.000529758573975414, |
| "learning_rate": 2.94165705421624e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 369976, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.0, |
| "eval_loss": 0.3129103481769562, |
| "eval_runtime": 0.5913, |
| "eval_samples_per_second": 42.282, |
| "eval_steps_per_second": 11.839, |
| "num_input_tokens_seen": 369976, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.087719298245615, |
| "grad_norm": 0.0008130972273647785, |
| "learning_rate": 2.9039340092291373e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 373304, |
| "step": 575 |
| }, |
| { |
| "epoch": 10.175438596491228, |
| "grad_norm": 0.0019185326527804136, |
| "learning_rate": 2.8661162867010543e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 376632, |
| "step": 580 |
| }, |
| { |
| "epoch": 10.263157894736842, |
| "grad_norm": 0.0006094375858083367, |
| "learning_rate": 2.8282127506763456e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 379608, |
| "step": 585 |
| }, |
| { |
| "epoch": 10.350877192982455, |
| "grad_norm": 0.00031448237132281065, |
| "learning_rate": 2.7902322853130757e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 382296, |
| "step": 590 |
| }, |
| { |
| "epoch": 10.43859649122807, |
| "grad_norm": 0.00057619484141469, |
| "learning_rate": 2.752183792800671e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 385880, |
| "step": 595 |
| }, |
| { |
| "epoch": 10.526315789473685, |
| "grad_norm": 0.0031581337098032236, |
| "learning_rate": 2.7140761912733474e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 388824, |
| "step": 600 |
| }, |
| { |
| "epoch": 10.614035087719298, |
| "grad_norm": 0.0004049862618558109, |
| "learning_rate": 2.6759184127198046e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 391640, |
| "step": 605 |
| }, |
| { |
| "epoch": 10.701754385964913, |
| "grad_norm": 0.002748518716543913, |
| "learning_rate": 2.6377194008896637e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 394968, |
| "step": 610 |
| }, |
| { |
| "epoch": 10.789473684210526, |
| "grad_norm": 0.0032486706040799618, |
| "learning_rate": 2.5994881091971605e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 398520, |
| "step": 615 |
| }, |
| { |
| "epoch": 10.87719298245614, |
| "grad_norm": 0.0009457306005060673, |
| "learning_rate": 2.5612334986225623e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 402328, |
| "step": 620 |
| }, |
| { |
| "epoch": 10.964912280701755, |
| "grad_norm": 0.0004885084345005453, |
| "learning_rate": 2.5229645356118163e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 405880, |
| "step": 625 |
| }, |
| { |
| "epoch": 11.0, |
| "eval_loss": 0.313282310962677, |
| "eval_runtime": 0.5935, |
| "eval_samples_per_second": 42.124, |
| "eval_steps_per_second": 11.795, |
| "num_input_tokens_seen": 406840, |
| "step": 627 |
| }, |
| { |
| "epoch": 11.052631578947368, |
| "grad_norm": 0.0002464507124386728, |
| "learning_rate": 2.4846901899749185e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 408632, |
| "step": 630 |
| }, |
| { |
| "epoch": 11.140350877192983, |
| "grad_norm": 0.0006373076466843486, |
| "learning_rate": 2.4464194327834926e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 411544, |
| "step": 635 |
| }, |
| { |
| "epoch": 11.228070175438596, |
| "grad_norm": 0.0006263373652473092, |
| "learning_rate": 2.4081612342680694e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 414648, |
| "step": 640 |
| }, |
| { |
| "epoch": 11.31578947368421, |
| "grad_norm": 0.0015425410820171237, |
| "learning_rate": 2.369924561715569e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 418968, |
| "step": 645 |
| }, |
| { |
| "epoch": 11.403508771929825, |
| "grad_norm": 0.0012284928234294057, |
| "learning_rate": 2.3317183773674718e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 422584, |
| "step": 650 |
| }, |
| { |
| "epoch": 11.491228070175438, |
| "grad_norm": 0.004264664836227894, |
| "learning_rate": 2.2935516363191693e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 426136, |
| "step": 655 |
| }, |
| { |
| "epoch": 11.578947368421053, |
| "grad_norm": 0.00045499380212277174, |
| "learning_rate": 2.2554332844209904e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 429752, |
| "step": 660 |
| }, |
| { |
| "epoch": 11.666666666666666, |
| "grad_norm": 0.00040232963510788977, |
| "learning_rate": 2.2173722561813987e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 432920, |
| "step": 665 |
| }, |
| { |
| "epoch": 11.75438596491228, |
| "grad_norm": 0.0010527268750593066, |
| "learning_rate": 2.179377472672842e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 435608, |
| "step": 670 |
| }, |
| { |
| "epoch": 11.842105263157894, |
| "grad_norm": 0.00039451048360206187, |
| "learning_rate": 2.1414578394407597e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 438744, |
| "step": 675 |
| }, |
| { |
| "epoch": 11.929824561403509, |
| "grad_norm": 0.000518005050253123, |
| "learning_rate": 2.1036222444162147e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 442200, |
| "step": 680 |
| }, |
| { |
| "epoch": 12.0, |
| "eval_loss": 0.3128127455711365, |
| "eval_runtime": 0.5918, |
| "eval_samples_per_second": 42.245, |
| "eval_steps_per_second": 11.829, |
| "num_input_tokens_seen": 444728, |
| "step": 684 |
| }, |
| { |
| "epoch": 12.017543859649123, |
| "grad_norm": 0.0005355413886718452, |
| "learning_rate": 2.0658795558326743e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 445624, |
| "step": 685 |
| }, |
| { |
| "epoch": 12.105263157894736, |
| "grad_norm": 0.00044618433457799256, |
| "learning_rate": 2.0282386201473894e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 448632, |
| "step": 690 |
| }, |
| { |
| "epoch": 12.192982456140351, |
| "grad_norm": 0.0004590868193190545, |
| "learning_rate": 1.99070825996789e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 452248, |
| "step": 695 |
| }, |
| { |
| "epoch": 12.280701754385966, |
| "grad_norm": 0.0013518532505258918, |
| "learning_rate": 1.9532972719840607e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 455544, |
| "step": 700 |
| }, |
| { |
| "epoch": 12.368421052631579, |
| "grad_norm": 0.0004382475162856281, |
| "learning_rate": 1.9160144249063035e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 458520, |
| "step": 705 |
| }, |
| { |
| "epoch": 12.456140350877194, |
| "grad_norm": 0.0004602537374012172, |
| "learning_rate": 1.8788684574102467e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 462264, |
| "step": 710 |
| }, |
| { |
| "epoch": 12.543859649122806, |
| "grad_norm": 0.0002539714623708278, |
| "learning_rate": 1.8418680760885027e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 465624, |
| "step": 715 |
| }, |
| { |
| "epoch": 12.631578947368421, |
| "grad_norm": 0.0024354192428290844, |
| "learning_rate": 1.805021953409934e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 468536, |
| "step": 720 |
| }, |
| { |
| "epoch": 12.719298245614034, |
| "grad_norm": 0.00024564217892475426, |
| "learning_rate": 1.7683387256869353e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 472408, |
| "step": 725 |
| }, |
| { |
| "epoch": 12.807017543859649, |
| "grad_norm": 0.0003044725744985044, |
| "learning_rate": 1.7318269910511736e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 475416, |
| "step": 730 |
| }, |
| { |
| "epoch": 12.894736842105264, |
| "grad_norm": 0.0007393347332254052, |
| "learning_rate": 1.6954953074382863e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 478712, |
| "step": 735 |
| }, |
| { |
| "epoch": 12.982456140350877, |
| "grad_norm": 0.0002836023340933025, |
| "learning_rate": 1.659352190581993e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 481592, |
| "step": 740 |
| }, |
| { |
| "epoch": 13.0, |
| "eval_loss": 0.321302205324173, |
| "eval_runtime": 0.5961, |
| "eval_samples_per_second": 41.942, |
| "eval_steps_per_second": 11.744, |
| "num_input_tokens_seen": 481720, |
| "step": 741 |
| }, |
| { |
| "epoch": 13.070175438596491, |
| "grad_norm": 0.0005303695797920227, |
| "learning_rate": 1.6234061120181142e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 484856, |
| "step": 745 |
| }, |
| { |
| "epoch": 13.157894736842104, |
| "grad_norm": 0.0018196111777797341, |
| "learning_rate": 1.5876654970989308e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 487480, |
| "step": 750 |
| }, |
| { |
| "epoch": 13.24561403508772, |
| "grad_norm": 0.0003485916822683066, |
| "learning_rate": 1.552138723018382e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 490520, |
| "step": 755 |
| }, |
| { |
| "epoch": 13.333333333333334, |
| "grad_norm": 0.0011123067233711481, |
| "learning_rate": 1.5168341168485423e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 493976, |
| "step": 760 |
| }, |
| { |
| "epoch": 13.421052631578947, |
| "grad_norm": 0.00037717074155807495, |
| "learning_rate": 1.4817599535878565e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 497080, |
| "step": 765 |
| }, |
| { |
| "epoch": 13.508771929824562, |
| "grad_norm": 0.0003352680359967053, |
| "learning_rate": 1.4469244542215682e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 500792, |
| "step": 770 |
| }, |
| { |
| "epoch": 13.596491228070175, |
| "grad_norm": 0.0005829462315887213, |
| "learning_rate": 1.4123357837948175e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 503832, |
| "step": 775 |
| }, |
| { |
| "epoch": 13.68421052631579, |
| "grad_norm": 0.00036655354779213667, |
| "learning_rate": 1.3780020494988446e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 507256, |
| "step": 780 |
| }, |
| { |
| "epoch": 13.771929824561404, |
| "grad_norm": 0.0004805860517080873, |
| "learning_rate": 1.3439312987707615e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 510616, |
| "step": 785 |
| }, |
| { |
| "epoch": 13.859649122807017, |
| "grad_norm": 0.0031606510747224092, |
| "learning_rate": 1.3101315174073162e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 514200, |
| "step": 790 |
| }, |
| { |
| "epoch": 13.947368421052632, |
| "grad_norm": 0.0015623451909050345, |
| "learning_rate": 1.2766106276931223e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 517400, |
| "step": 795 |
| }, |
| { |
| "epoch": 14.0, |
| "eval_loss": 0.32733410596847534, |
| "eval_runtime": 0.5912, |
| "eval_samples_per_second": 42.288, |
| "eval_steps_per_second": 11.841, |
| "num_input_tokens_seen": 518664, |
| "step": 798 |
| }, |
| { |
| "epoch": 14.035087719298245, |
| "grad_norm": 0.00028240587562322617, |
| "learning_rate": 1.243376486543755e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 519848, |
| "step": 800 |
| }, |
| { |
| "epoch": 14.12280701754386, |
| "grad_norm": 0.0003585837548598647, |
| "learning_rate": 1.2104368836641908e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 523176, |
| "step": 805 |
| }, |
| { |
| "epoch": 14.210526315789474, |
| "grad_norm": 0.0005010432214476168, |
| "learning_rate": 1.1777995397229771e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 526280, |
| "step": 810 |
| }, |
| { |
| "epoch": 14.298245614035087, |
| "grad_norm": 0.0006202217773534358, |
| "learning_rate": 1.1454721045426073e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 529160, |
| "step": 815 |
| }, |
| { |
| "epoch": 14.385964912280702, |
| "grad_norm": 0.00036778871435672045, |
| "learning_rate": 1.113462155306478e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 533256, |
| "step": 820 |
| }, |
| { |
| "epoch": 14.473684210526315, |
| "grad_norm": 0.00103961571585387, |
| "learning_rate": 1.0817771947828934e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 536712, |
| "step": 825 |
| }, |
| { |
| "epoch": 14.56140350877193, |
| "grad_norm": 0.00032520951936021447, |
| "learning_rate": 1.0504246495664932e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 539912, |
| "step": 830 |
| }, |
| { |
| "epoch": 14.649122807017545, |
| "grad_norm": 0.00039382887189276516, |
| "learning_rate": 1.0194118683375503e-05, |
| "loss": 0.0, |
| "num_input_tokens_seen": 543336, |
| "step": 835 |
| }, |
| { |
| "epoch": 14.736842105263158, |
| "grad_norm": 0.00046222357195802033, |
| "learning_rate": 9.887461201395176e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 547496, |
| "step": 840 |
| }, |
| { |
| "epoch": 14.824561403508772, |
| "grad_norm": 0.00029964870191179216, |
| "learning_rate": 9.584345926752524e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 550280, |
| "step": 845 |
| }, |
| { |
| "epoch": 14.912280701754385, |
| "grad_norm": 0.002510607708245516, |
| "learning_rate": 9.284843906222948e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 553672, |
| "step": 850 |
| }, |
| { |
| "epoch": 15.0, |
| "grad_norm": 0.000540726410690695, |
| "learning_rate": 8.98902533967618e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 555728, |
| "step": 855 |
| }, |
| { |
| "epoch": 15.0, |
| "eval_loss": 0.3253510892391205, |
| "eval_runtime": 0.594, |
| "eval_samples_per_second": 42.088, |
| "eval_steps_per_second": 11.785, |
| "num_input_tokens_seen": 555728, |
| "step": 855 |
| }, |
| { |
| "epoch": 15.087719298245615, |
| "grad_norm": 0.0006719044758938253, |
| "learning_rate": 8.696959563622174e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 559312, |
| "step": 860 |
| }, |
| { |
| "epoch": 15.175438596491228, |
| "grad_norm": 0.00043321613338775933, |
| "learning_rate": 8.40871503495947e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 563056, |
| "step": 865 |
| }, |
| { |
| "epoch": 15.263157894736842, |
| "grad_norm": 0.0008227255311794579, |
| "learning_rate": 8.124359314929622e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 566224, |
| "step": 870 |
| }, |
| { |
| "epoch": 15.350877192982455, |
| "grad_norm": 0.00031539748306386173, |
| "learning_rate": 7.843959053281663e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 569776, |
| "step": 875 |
| }, |
| { |
| "epoch": 15.43859649122807, |
| "grad_norm": 0.0006076199933886528, |
| "learning_rate": 7.5675799726501155e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 573360, |
| "step": 880 |
| }, |
| { |
| "epoch": 15.526315789473685, |
| "grad_norm": 0.0012739731464534998, |
| "learning_rate": 7.295286853150391e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 576144, |
| "step": 885 |
| }, |
| { |
| "epoch": 15.614035087719298, |
| "grad_norm": 0.0013133555185049772, |
| "learning_rate": 7.027143517195023e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 579088, |
| "step": 890 |
| }, |
| { |
| "epoch": 15.701754385964913, |
| "grad_norm": 0.0015471307560801506, |
| "learning_rate": 6.763212814534484e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 582032, |
| "step": 895 |
| }, |
| { |
| "epoch": 15.789473684210526, |
| "grad_norm": 0.0004258696862962097, |
| "learning_rate": 6.503556607525838e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 585584, |
| "step": 900 |
| }, |
| { |
| "epoch": 15.87719298245614, |
| "grad_norm": 0.0004040314524900168, |
| "learning_rate": 6.248235756632984e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 588944, |
| "step": 905 |
| }, |
| { |
| "epoch": 15.964912280701755, |
| "grad_norm": 0.0003530940448399633, |
| "learning_rate": 5.997310106161589e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 592112, |
| "step": 910 |
| }, |
| { |
| "epoch": 16.0, |
| "eval_loss": 0.3247371017932892, |
| "eval_runtime": 0.5964, |
| "eval_samples_per_second": 41.919, |
| "eval_steps_per_second": 11.737, |
| "num_input_tokens_seen": 593096, |
| "step": 912 |
| }, |
| { |
| "epoch": 16.05263157894737, |
| "grad_norm": 0.001161749241873622, |
| "learning_rate": 5.7508384702323226e-06, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 594440, |
| "step": 915 |
| }, |
| { |
| "epoch": 16.140350877192983, |
| "grad_norm": 0.0003777670208364725, |
| "learning_rate": 5.508878618995439e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 598440, |
| "step": 920 |
| }, |
| { |
| "epoch": 16.228070175438596, |
| "grad_norm": 0.0024024529848247766, |
| "learning_rate": 5.271487265090163e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 602152, |
| "step": 925 |
| }, |
| { |
| "epoch": 16.31578947368421, |
| "grad_norm": 0.00035852554719895124, |
| "learning_rate": 5.038720050351842e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 605736, |
| "step": 930 |
| }, |
| { |
| "epoch": 16.403508771929825, |
| "grad_norm": 0.0004025466041639447, |
| "learning_rate": 4.810631532770182e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 608520, |
| "step": 935 |
| }, |
| { |
| "epoch": 16.49122807017544, |
| "grad_norm": 0.00032559980172663927, |
| "learning_rate": 4.587275173701428e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 612040, |
| "step": 940 |
| }, |
| { |
| "epoch": 16.57894736842105, |
| "grad_norm": 0.0004513366729952395, |
| "learning_rate": 4.368703325337667e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 614920, |
| "step": 945 |
| }, |
| { |
| "epoch": 16.666666666666668, |
| "grad_norm": 0.00046209024731069803, |
| "learning_rate": 4.154967218436037e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 618440, |
| "step": 950 |
| }, |
| { |
| "epoch": 16.75438596491228, |
| "grad_norm": 0.00048692928976379335, |
| "learning_rate": 3.94611695031086e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 621320, |
| "step": 955 |
| }, |
| { |
| "epoch": 16.842105263157894, |
| "grad_norm": 0.0005620087031275034, |
| "learning_rate": 3.74220147309135e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 624648, |
| "step": 960 |
| }, |
| { |
| "epoch": 16.92982456140351, |
| "grad_norm": 0.0005436613573692739, |
| "learning_rate": 3.543268582247844e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 628232, |
| "step": 965 |
| }, |
| { |
| "epoch": 17.0, |
| "eval_loss": 0.3292401134967804, |
| "eval_runtime": 0.591, |
| "eval_samples_per_second": 42.301, |
| "eval_steps_per_second": 11.844, |
| "num_input_tokens_seen": 629760, |
| "step": 969 |
| }, |
| { |
| "epoch": 17.017543859649123, |
| "grad_norm": 0.0009161728667095304, |
| "learning_rate": 3.3493649053890326e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 630528, |
| "step": 970 |
| }, |
| { |
| "epoch": 17.105263157894736, |
| "grad_norm": 0.0002899458340834826, |
| "learning_rate": 3.1605358913330385e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 633600, |
| "step": 975 |
| }, |
| { |
| "epoch": 17.19298245614035, |
| "grad_norm": 0.0006383854779414833, |
| "learning_rate": 2.9768257994546662e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 637024, |
| "step": 980 |
| }, |
| { |
| "epoch": 17.280701754385966, |
| "grad_norm": 0.0002234369021607563, |
| "learning_rate": 2.7982776893115627e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 640160, |
| "step": 985 |
| }, |
| { |
| "epoch": 17.36842105263158, |
| "grad_norm": 0.0005948686157353222, |
| "learning_rate": 2.624933410551508e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 643040, |
| "step": 990 |
| }, |
| { |
| "epoch": 17.45614035087719, |
| "grad_norm": 0.000273498852038756, |
| "learning_rate": 2.456833593103361e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 646336, |
| "step": 995 |
| }, |
| { |
| "epoch": 17.54385964912281, |
| "grad_norm": 0.0006787853199057281, |
| "learning_rate": 2.2940176376538445e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 649984, |
| "step": 1000 |
| }, |
| { |
| "epoch": 17.63157894736842, |
| "grad_norm": 0.006714384537190199, |
| "learning_rate": 2.136523706412477e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 653632, |
| "step": 1005 |
| }, |
| { |
| "epoch": 17.719298245614034, |
| "grad_norm": 0.00285444688051939, |
| "learning_rate": 1.984388714166799e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 656960, |
| "step": 1010 |
| }, |
| { |
| "epoch": 17.80701754385965, |
| "grad_norm": 0.00030963635072112083, |
| "learning_rate": 1.837648319629956e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 659840, |
| "step": 1015 |
| }, |
| { |
| "epoch": 17.894736842105264, |
| "grad_norm": 0.00046600456698797643, |
| "learning_rate": 1.6963369170826943e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 663872, |
| "step": 1020 |
| }, |
| { |
| "epoch": 17.982456140350877, |
| "grad_norm": 0.0007396559813059866, |
| "learning_rate": 1.5604876283117326e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 667264, |
| "step": 1025 |
| }, |
| { |
| "epoch": 18.0, |
| "eval_loss": 0.3255113959312439, |
| "eval_runtime": 0.5941, |
| "eval_samples_per_second": 42.083, |
| "eval_steps_per_second": 11.783, |
| "num_input_tokens_seen": 667432, |
| "step": 1026 |
| }, |
| { |
| "epoch": 18.07017543859649, |
| "grad_norm": 0.00033738979254849255, |
| "learning_rate": 1.4301322948464147e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 669544, |
| "step": 1030 |
| }, |
| { |
| "epoch": 18.157894736842106, |
| "grad_norm": 0.0004050004936289042, |
| "learning_rate": 1.3053014704953987e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 672776, |
| "step": 1035 |
| }, |
| { |
| "epoch": 18.24561403508772, |
| "grad_norm": 0.000312207528622821, |
| "learning_rate": 1.1860244141851773e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 676008, |
| "step": 1040 |
| }, |
| { |
| "epoch": 18.333333333333332, |
| "grad_norm": 0.0004885089583694935, |
| "learning_rate": 1.0723290831021471e-06, |
| "loss": 0.0, |
| "num_input_tokens_seen": 679848, |
| "step": 1045 |
| }, |
| { |
| "epoch": 18.42105263157895, |
| "grad_norm": 0.0005419268272817135, |
| "learning_rate": 9.642421261397472e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 683144, |
| "step": 1050 |
| }, |
| { |
| "epoch": 18.50877192982456, |
| "grad_norm": 0.0003033233806490898, |
| "learning_rate": 8.617888776522642e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 686280, |
| "step": 1055 |
| }, |
| { |
| "epoch": 18.596491228070175, |
| "grad_norm": 0.00034189768484793603, |
| "learning_rate": 7.649933515167407e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 689480, |
| "step": 1060 |
| }, |
| { |
| "epoch": 18.68421052631579, |
| "grad_norm": 0.005335874855518341, |
| "learning_rate": 6.738782355044049e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 692296, |
| "step": 1065 |
| }, |
| { |
| "epoch": 18.771929824561404, |
| "grad_norm": 0.000625142827630043, |
| "learning_rate": 5.88464885962911e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 695912, |
| "step": 1070 |
| }, |
| { |
| "epoch": 18.859649122807017, |
| "grad_norm": 0.00026616669492796063, |
| "learning_rate": 5.087733228106517e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 699432, |
| "step": 1075 |
| }, |
| { |
| "epoch": 18.94736842105263, |
| "grad_norm": 0.001974803861230612, |
| "learning_rate": 4.3482222484432513e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 703080, |
| "step": 1080 |
| }, |
| { |
| "epoch": 19.0, |
| "eval_loss": 0.3304460644721985, |
| "eval_runtime": 0.5906, |
| "eval_samples_per_second": 42.333, |
| "eval_steps_per_second": 11.853, |
| "num_input_tokens_seen": 704816, |
| "step": 1083 |
| }, |
| { |
| "epoch": 19.035087719298247, |
| "grad_norm": 0.0020954215433448553, |
| "learning_rate": 3.666289253608235e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 706192, |
| "step": 1085 |
| }, |
| { |
| "epoch": 19.12280701754386, |
| "grad_norm": 0.0006088721565902233, |
| "learning_rate": 3.0420940809451624e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 709072, |
| "step": 1090 |
| }, |
| { |
| "epoch": 19.210526315789473, |
| "grad_norm": 0.00034122963552363217, |
| "learning_rate": 2.47578303470844e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 712496, |
| "step": 1095 |
| }, |
| { |
| "epoch": 19.29824561403509, |
| "grad_norm": 0.00031280185794457793, |
| "learning_rate": 1.96748885177106e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 715056, |
| "step": 1100 |
| }, |
| { |
| "epoch": 19.385964912280702, |
| "grad_norm": 0.0002845152048394084, |
| "learning_rate": 1.517330670512629e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 718416, |
| "step": 1105 |
| }, |
| { |
| "epoch": 19.473684210526315, |
| "grad_norm": 0.006639602594077587, |
| "learning_rate": 1.125414002894759e-07, |
| "loss": 0.0, |
| "num_input_tokens_seen": 721744, |
| "step": 1110 |
| }, |
| { |
| "epoch": 19.56140350877193, |
| "grad_norm": 0.00038960896199569106, |
| "learning_rate": 7.918307097301014e-08, |
| "loss": 0.0, |
| "num_input_tokens_seen": 725328, |
| "step": 1115 |
| }, |
| { |
| "epoch": 19.649122807017545, |
| "grad_norm": 0.00028305064188316464, |
| "learning_rate": 5.166589791513465e-08, |
| "loss": 0.0, |
| "num_input_tokens_seen": 728976, |
| "step": 1120 |
| }, |
| { |
| "epoch": 19.736842105263158, |
| "grad_norm": 0.003096348373219371, |
| "learning_rate": 2.999633082847453e-08, |
| "loss": 0.0, |
| "num_input_tokens_seen": 732368, |
| "step": 1125 |
| }, |
| { |
| "epoch": 19.82456140350877, |
| "grad_norm": 0.0003074291453231126, |
| "learning_rate": 1.4179448813278484e-08, |
| "loss": 0.0, |
| "num_input_tokens_seen": 735440, |
| "step": 1130 |
| }, |
| { |
| "epoch": 19.912280701754387, |
| "grad_norm": 0.0006019927677698433, |
| "learning_rate": 4.218959166932268e-09, |
| "loss": 0.0, |
| "num_input_tokens_seen": 739120, |
| "step": 1135 |
| }, |
| { |
| "epoch": 20.0, |
| "grad_norm": 0.0002585648908279836, |
| "learning_rate": 1.1719651499819683e-10, |
| "loss": 0.0, |
| "num_input_tokens_seen": 742296, |
| "step": 1140 |
| }, |
| { |
| "epoch": 20.0, |
| "eval_loss": 0.3307504951953888, |
| "eval_runtime": 0.5933, |
| "eval_samples_per_second": 42.134, |
| "eval_steps_per_second": 11.798, |
| "num_input_tokens_seen": 742296, |
| "step": 1140 |
| }, |
| { |
| "epoch": 20.0, |
| "num_input_tokens_seen": 742296, |
| "step": 1140, |
| "total_flos": 3.3490635102683136e+16, |
| "train_loss": 0.04608795025587346, |
| "train_runtime": 338.2927, |
| "train_samples_per_second": 13.302, |
| "train_steps_per_second": 3.37 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 1140, |
| "num_input_tokens_seen": 742296, |
| "num_train_epochs": 20, |
| "save_steps": 57, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3.3490635102683136e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|