| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 46.15384615384615, |
| "eval_steps": 50, |
| "global_step": 600, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.07692307692307693, |
| "eval_loss": 10.376392364501953, |
| "eval_runtime": 5.3202, |
| "eval_samples_per_second": 282.131, |
| "eval_steps_per_second": 4.511, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.7692307692307693, |
| "grad_norm": 0.09619140625, |
| "learning_rate": 6.666666666666667e-05, |
| "loss": 10.378, |
| "step": 10 |
| }, |
| { |
| "epoch": 1.5384615384615383, |
| "grad_norm": 0.09716796875, |
| "learning_rate": 0.00013333333333333334, |
| "loss": 10.3754, |
| "step": 20 |
| }, |
| { |
| "epoch": 2.3076923076923075, |
| "grad_norm": 0.11279296875, |
| "learning_rate": 0.0002, |
| "loss": 10.3683, |
| "step": 30 |
| }, |
| { |
| "epoch": 3.076923076923077, |
| "grad_norm": 0.201171875, |
| "learning_rate": 0.00019984815164333163, |
| "loss": 10.3522, |
| "step": 40 |
| }, |
| { |
| "epoch": 3.8461538461538463, |
| "grad_norm": 0.3203125, |
| "learning_rate": 0.00019939306773179497, |
| "loss": 10.3159, |
| "step": 50 |
| }, |
| { |
| "epoch": 3.8461538461538463, |
| "eval_loss": 10.285228729248047, |
| "eval_runtime": 5.2726, |
| "eval_samples_per_second": 284.681, |
| "eval_steps_per_second": 4.552, |
| "step": 50 |
| }, |
| { |
| "epoch": 4.615384615384615, |
| "grad_norm": 0.32421875, |
| "learning_rate": 0.00019863613034027224, |
| "loss": 10.2518, |
| "step": 60 |
| }, |
| { |
| "epoch": 5.384615384615385, |
| "grad_norm": 0.326171875, |
| "learning_rate": 0.00019757963826274357, |
| "loss": 10.1828, |
| "step": 70 |
| }, |
| { |
| "epoch": 6.153846153846154, |
| "grad_norm": 0.330078125, |
| "learning_rate": 0.00019622680003092503, |
| "loss": 10.1177, |
| "step": 80 |
| }, |
| { |
| "epoch": 6.923076923076923, |
| "grad_norm": 0.33203125, |
| "learning_rate": 0.00019458172417006347, |
| "loss": 10.0565, |
| "step": 90 |
| }, |
| { |
| "epoch": 7.6923076923076925, |
| "grad_norm": 0.341796875, |
| "learning_rate": 0.00019264940672148018, |
| "loss": 9.998, |
| "step": 100 |
| }, |
| { |
| "epoch": 7.6923076923076925, |
| "eval_loss": 9.973793029785156, |
| "eval_runtime": 5.2822, |
| "eval_samples_per_second": 284.164, |
| "eval_steps_per_second": 4.544, |
| "step": 100 |
| }, |
| { |
| "epoch": 8.461538461538462, |
| "grad_norm": 0.345703125, |
| "learning_rate": 0.00019043571606975777, |
| "loss": 9.942, |
| "step": 110 |
| }, |
| { |
| "epoch": 9.23076923076923, |
| "grad_norm": 0.34765625, |
| "learning_rate": 0.0001879473751206489, |
| "loss": 9.8887, |
| "step": 120 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 0.349609375, |
| "learning_rate": 0.00018519194088383273, |
| "loss": 9.836, |
| "step": 130 |
| }, |
| { |
| "epoch": 10.76923076923077, |
| "grad_norm": 0.357421875, |
| "learning_rate": 0.0001821777815225245, |
| "loss": 9.7845, |
| "step": 140 |
| }, |
| { |
| "epoch": 11.538461538461538, |
| "grad_norm": 0.361328125, |
| "learning_rate": 0.00017891405093963938, |
| "loss": 9.7359, |
| "step": 150 |
| }, |
| { |
| "epoch": 11.538461538461538, |
| "eval_loss": 9.719038963317871, |
| "eval_runtime": 5.2736, |
| "eval_samples_per_second": 284.626, |
| "eval_steps_per_second": 4.551, |
| "step": 150 |
| }, |
| { |
| "epoch": 12.307692307692308, |
| "grad_norm": 0.369140625, |
| "learning_rate": 0.00017541066097768963, |
| "loss": 9.688, |
| "step": 160 |
| }, |
| { |
| "epoch": 13.076923076923077, |
| "grad_norm": 0.3671875, |
| "learning_rate": 0.00017167825131684513, |
| "loss": 9.6416, |
| "step": 170 |
| }, |
| { |
| "epoch": 13.846153846153847, |
| "grad_norm": 0.375, |
| "learning_rate": 0.00016772815716257412, |
| "loss": 9.597, |
| "step": 180 |
| }, |
| { |
| "epoch": 14.615384615384615, |
| "grad_norm": 0.380859375, |
| "learning_rate": 0.00016357237482099684, |
| "loss": 9.5534, |
| "step": 190 |
| }, |
| { |
| "epoch": 15.384615384615385, |
| "grad_norm": 0.37890625, |
| "learning_rate": 0.00015922352526649803, |
| "loss": 9.5151, |
| "step": 200 |
| }, |
| { |
| "epoch": 15.384615384615385, |
| "eval_loss": 9.504176139831543, |
| "eval_runtime": 5.2799, |
| "eval_samples_per_second": 284.283, |
| "eval_steps_per_second": 4.545, |
| "step": 200 |
| }, |
| { |
| "epoch": 16.153846153846153, |
| "grad_norm": 0.3828125, |
| "learning_rate": 0.00015469481581224272, |
| "loss": 9.4734, |
| "step": 210 |
| }, |
| { |
| "epoch": 16.923076923076923, |
| "grad_norm": 0.388671875, |
| "learning_rate": 0.00015000000000000001, |
| "loss": 9.4381, |
| "step": 220 |
| }, |
| { |
| "epoch": 17.692307692307693, |
| "grad_norm": 0.39453125, |
| "learning_rate": 0.00014515333583108896, |
| "loss": 9.4021, |
| "step": 230 |
| }, |
| { |
| "epoch": 18.46153846153846, |
| "grad_norm": 0.396484375, |
| "learning_rate": 0.00014016954246529696, |
| "loss": 9.3725, |
| "step": 240 |
| }, |
| { |
| "epoch": 19.23076923076923, |
| "grad_norm": 0.396484375, |
| "learning_rate": 0.00013506375551927547, |
| "loss": 9.3407, |
| "step": 250 |
| }, |
| { |
| "epoch": 19.23076923076923, |
| "eval_loss": 9.341134071350098, |
| "eval_runtime": 5.9586, |
| "eval_samples_per_second": 251.904, |
| "eval_steps_per_second": 4.028, |
| "step": 250 |
| }, |
| { |
| "epoch": 20.0, |
| "grad_norm": 0.400390625, |
| "learning_rate": 0.00012985148110016947, |
| "loss": 9.3153, |
| "step": 260 |
| }, |
| { |
| "epoch": 20.76923076923077, |
| "grad_norm": 0.40234375, |
| "learning_rate": 0.00012454854871407994, |
| "loss": 9.2905, |
| "step": 270 |
| }, |
| { |
| "epoch": 21.53846153846154, |
| "grad_norm": 0.408203125, |
| "learning_rate": 0.00011917106319237386, |
| "loss": 9.2678, |
| "step": 280 |
| }, |
| { |
| "epoch": 22.307692307692307, |
| "grad_norm": 0.404296875, |
| "learning_rate": 0.00011373535578184082, |
| "loss": 9.2496, |
| "step": 290 |
| }, |
| { |
| "epoch": 23.076923076923077, |
| "grad_norm": 0.40625, |
| "learning_rate": 0.00010825793454723325, |
| "loss": 9.2338, |
| "step": 300 |
| }, |
| { |
| "epoch": 23.076923076923077, |
| "eval_loss": 9.241479873657227, |
| "eval_runtime": 5.2815, |
| "eval_samples_per_second": 284.199, |
| "eval_steps_per_second": 4.544, |
| "step": 300 |
| }, |
| { |
| "epoch": 23.846153846153847, |
| "grad_norm": 0.412109375, |
| "learning_rate": 0.00010275543423681621, |
| "loss": 9.2185, |
| "step": 310 |
| }, |
| { |
| "epoch": 24.615384615384617, |
| "grad_norm": 0.4140625, |
| "learning_rate": 9.724456576318381e-05, |
| "loss": 9.2101, |
| "step": 320 |
| }, |
| { |
| "epoch": 25.384615384615383, |
| "grad_norm": 0.412109375, |
| "learning_rate": 9.174206545276677e-05, |
| "loss": 9.2002, |
| "step": 330 |
| }, |
| { |
| "epoch": 26.153846153846153, |
| "grad_norm": 0.416015625, |
| "learning_rate": 8.626464421815919e-05, |
| "loss": 9.193, |
| "step": 340 |
| }, |
| { |
| "epoch": 26.923076923076923, |
| "grad_norm": 0.41015625, |
| "learning_rate": 8.082893680762619e-05, |
| "loss": 9.1896, |
| "step": 350 |
| }, |
| { |
| "epoch": 26.923076923076923, |
| "eval_loss": 9.20389461517334, |
| "eval_runtime": 5.2643, |
| "eval_samples_per_second": 285.129, |
| "eval_steps_per_second": 4.559, |
| "step": 350 |
| }, |
| { |
| "epoch": 27.692307692307693, |
| "grad_norm": 0.416015625, |
| "learning_rate": 7.54514512859201e-05, |
| "loss": 9.186, |
| "step": 360 |
| }, |
| { |
| "epoch": 28.46153846153846, |
| "grad_norm": 0.412109375, |
| "learning_rate": 7.014851889983057e-05, |
| "loss": 9.182, |
| "step": 370 |
| }, |
| { |
| "epoch": 29.23076923076923, |
| "grad_norm": 0.416015625, |
| "learning_rate": 6.493624448072457e-05, |
| "loss": 9.1823, |
| "step": 380 |
| }, |
| { |
| "epoch": 30.0, |
| "grad_norm": 0.4140625, |
| "learning_rate": 5.983045753470308e-05, |
| "loss": 9.1806, |
| "step": 390 |
| }, |
| { |
| "epoch": 30.76923076923077, |
| "grad_norm": 0.419921875, |
| "learning_rate": 5.484666416891109e-05, |
| "loss": 9.18, |
| "step": 400 |
| }, |
| { |
| "epoch": 30.76923076923077, |
| "eval_loss": 9.195965766906738, |
| "eval_runtime": 5.2708, |
| "eval_samples_per_second": 284.775, |
| "eval_steps_per_second": 4.553, |
| "step": 400 |
| }, |
| { |
| "epoch": 31.53846153846154, |
| "grad_norm": 0.4140625, |
| "learning_rate": 5.000000000000002e-05, |
| "loss": 9.1787, |
| "step": 410 |
| }, |
| { |
| "epoch": 32.30769230769231, |
| "grad_norm": 0.419921875, |
| "learning_rate": 4.530518418775733e-05, |
| "loss": 9.1782, |
| "step": 420 |
| }, |
| { |
| "epoch": 33.07692307692308, |
| "grad_norm": 0.41796875, |
| "learning_rate": 4.077647473350201e-05, |
| "loss": 9.1776, |
| "step": 430 |
| }, |
| { |
| "epoch": 33.84615384615385, |
| "grad_norm": 0.416015625, |
| "learning_rate": 3.642762517900322e-05, |
| "loss": 9.1787, |
| "step": 440 |
| }, |
| { |
| "epoch": 34.61538461538461, |
| "grad_norm": 0.416015625, |
| "learning_rate": 3.227184283742591e-05, |
| "loss": 9.1777, |
| "step": 450 |
| }, |
| { |
| "epoch": 34.61538461538461, |
| "eval_loss": 9.195714950561523, |
| "eval_runtime": 5.2816, |
| "eval_samples_per_second": 284.194, |
| "eval_steps_per_second": 4.544, |
| "step": 450 |
| }, |
| { |
| "epoch": 35.38461538461539, |
| "grad_norm": 0.4140625, |
| "learning_rate": 2.8321748683154893e-05, |
| "loss": 9.1782, |
| "step": 460 |
| }, |
| { |
| "epoch": 36.15384615384615, |
| "grad_norm": 0.419921875, |
| "learning_rate": 2.4589339022310386e-05, |
| "loss": 9.1783, |
| "step": 470 |
| }, |
| { |
| "epoch": 36.92307692307692, |
| "grad_norm": 0.416015625, |
| "learning_rate": 2.1085949060360654e-05, |
| "loss": 9.1769, |
| "step": 480 |
| }, |
| { |
| "epoch": 37.69230769230769, |
| "grad_norm": 0.4140625, |
| "learning_rate": 1.7822218477475494e-05, |
| "loss": 9.1779, |
| "step": 490 |
| }, |
| { |
| "epoch": 38.46153846153846, |
| "grad_norm": 0.419921875, |
| "learning_rate": 1.4808059116167305e-05, |
| "loss": 9.1781, |
| "step": 500 |
| }, |
| { |
| "epoch": 38.46153846153846, |
| "eval_loss": 9.193068504333496, |
| "eval_runtime": 5.6598, |
| "eval_samples_per_second": 265.206, |
| "eval_steps_per_second": 4.24, |
| "step": 500 |
| }, |
| { |
| "epoch": 39.23076923076923, |
| "grad_norm": 0.419921875, |
| "learning_rate": 1.2052624879351104e-05, |
| "loss": 9.1771, |
| "step": 510 |
| }, |
| { |
| "epoch": 40.0, |
| "grad_norm": 0.41796875, |
| "learning_rate": 9.564283930242257e-06, |
| "loss": 9.1775, |
| "step": 520 |
| }, |
| { |
| "epoch": 40.76923076923077, |
| "grad_norm": 0.416015625, |
| "learning_rate": 7.350593278519824e-06, |
| "loss": 9.1773, |
| "step": 530 |
| }, |
| { |
| "epoch": 41.53846153846154, |
| "grad_norm": 0.41796875, |
| "learning_rate": 5.418275829936537e-06, |
| "loss": 9.1787, |
| "step": 540 |
| }, |
| { |
| "epoch": 42.30769230769231, |
| "grad_norm": 0.41796875, |
| "learning_rate": 3.7731999690749585e-06, |
| "loss": 9.1761, |
| "step": 550 |
| }, |
| { |
| "epoch": 42.30769230769231, |
| "eval_loss": 9.19363784790039, |
| "eval_runtime": 5.2646, |
| "eval_samples_per_second": 285.113, |
| "eval_steps_per_second": 4.559, |
| "step": 550 |
| }, |
| { |
| "epoch": 43.07692307692308, |
| "grad_norm": 0.4140625, |
| "learning_rate": 2.420361737256438e-06, |
| "loss": 9.1784, |
| "step": 560 |
| }, |
| { |
| "epoch": 43.84615384615385, |
| "grad_norm": 0.41796875, |
| "learning_rate": 1.3638696597277679e-06, |
| "loss": 9.1777, |
| "step": 570 |
| }, |
| { |
| "epoch": 44.61538461538461, |
| "grad_norm": 0.419921875, |
| "learning_rate": 6.069322682050516e-07, |
| "loss": 9.1766, |
| "step": 580 |
| }, |
| { |
| "epoch": 45.38461538461539, |
| "grad_norm": 0.412109375, |
| "learning_rate": 1.518483566683826e-07, |
| "loss": 9.1785, |
| "step": 590 |
| }, |
| { |
| "epoch": 46.15384615384615, |
| "grad_norm": 0.419921875, |
| "learning_rate": 0.0, |
| "loss": 9.1762, |
| "step": 600 |
| }, |
| { |
| "epoch": 46.15384615384615, |
| "eval_loss": 9.194318771362305, |
| "eval_runtime": 5.2604, |
| "eval_samples_per_second": 285.34, |
| "eval_steps_per_second": 4.562, |
| "step": 600 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 600, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 47, |
| "save_steps": 50, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 245495129702400.0, |
| "train_batch_size": 32, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|