| { | |
| "best_global_step": 6300, | |
| "best_metric": 3.5436816215515137, | |
| "best_model_checkpoint": "./checkpoints/lstm_h1024_l3_d0.1/checkpoint-6300", | |
| "epoch": 0.63, | |
| "eval_steps": 50, | |
| "global_step": 6300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.001, | |
| "grad_norm": 0.3549754023551941, | |
| "learning_rate": 1.8e-06, | |
| "loss": 60.8128, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.002, | |
| "grad_norm": 0.3771856427192688, | |
| "learning_rate": 3.8e-06, | |
| "loss": 60.8074, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.003, | |
| "grad_norm": 0.38984280824661255, | |
| "learning_rate": 5.8e-06, | |
| "loss": 60.7973, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.004, | |
| "grad_norm": 0.3642944097518921, | |
| "learning_rate": 7.8e-06, | |
| "loss": 60.786, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.005, | |
| "grad_norm": 0.38839685916900635, | |
| "learning_rate": 9.800000000000001e-06, | |
| "loss": 60.7688, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.005, | |
| "eval_loss": 15.188536643981934, | |
| "eval_runtime": 6.2292, | |
| "eval_samples_per_second": 321.071, | |
| "eval_steps_per_second": 20.067, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.006, | |
| "grad_norm": 0.4147929549217224, | |
| "learning_rate": 1.18e-05, | |
| "loss": 60.7432, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.007, | |
| "grad_norm": 0.4341050088405609, | |
| "learning_rate": 1.3800000000000002e-05, | |
| "loss": 60.7053, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.008, | |
| "grad_norm": 0.5660704374313354, | |
| "learning_rate": 1.58e-05, | |
| "loss": 60.6374, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.009, | |
| "grad_norm": 1.0963184833526611, | |
| "learning_rate": 1.78e-05, | |
| "loss": 60.4508, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 3.7994534969329834, | |
| "learning_rate": 1.9800000000000004e-05, | |
| "loss": 59.5999, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "eval_loss": 14.58733081817627, | |
| "eval_runtime": 6.1941, | |
| "eval_samples_per_second": 322.887, | |
| "eval_steps_per_second": 20.18, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.011, | |
| "grad_norm": 5.019301414489746, | |
| "learning_rate": 2.18e-05, | |
| "loss": 57.248, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.012, | |
| "grad_norm": 2.526005744934082, | |
| "learning_rate": 2.38e-05, | |
| "loss": 55.7676, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.013, | |
| "grad_norm": 2.1965389251708984, | |
| "learning_rate": 2.58e-05, | |
| "loss": 55.3816, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.014, | |
| "grad_norm": 1.439666748046875, | |
| "learning_rate": 2.7800000000000005e-05, | |
| "loss": 54.9717, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.015, | |
| "grad_norm": 1.7099782228469849, | |
| "learning_rate": 2.98e-05, | |
| "loss": 54.954, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.015, | |
| "eval_loss": 13.733558654785156, | |
| "eval_runtime": 6.2158, | |
| "eval_samples_per_second": 321.758, | |
| "eval_steps_per_second": 20.11, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.016, | |
| "grad_norm": 1.8854618072509766, | |
| "learning_rate": 3.18e-05, | |
| "loss": 54.5982, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.017, | |
| "grad_norm": 2.250976085662842, | |
| "learning_rate": 3.38e-05, | |
| "loss": 54.526, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.018, | |
| "grad_norm": 1.5891501903533936, | |
| "learning_rate": 3.58e-05, | |
| "loss": 54.5763, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.019, | |
| "grad_norm": 1.5920642614364624, | |
| "learning_rate": 3.7800000000000004e-05, | |
| "loss": 54.4684, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 1.5550366640090942, | |
| "learning_rate": 3.9800000000000005e-05, | |
| "loss": 54.3639, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "eval_loss": 13.6970796585083, | |
| "eval_runtime": 6.2966, | |
| "eval_samples_per_second": 317.634, | |
| "eval_steps_per_second": 19.852, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.021, | |
| "grad_norm": 2.051086187362671, | |
| "learning_rate": 4.18e-05, | |
| "loss": 54.3304, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.022, | |
| "grad_norm": 1.848323106765747, | |
| "learning_rate": 4.38e-05, | |
| "loss": 54.744, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.023, | |
| "grad_norm": 1.9148523807525635, | |
| "learning_rate": 4.58e-05, | |
| "loss": 54.6018, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.024, | |
| "grad_norm": 3.823683500289917, | |
| "learning_rate": 4.78e-05, | |
| "loss": 54.7114, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.025, | |
| "grad_norm": 2.885927438735962, | |
| "learning_rate": 4.9800000000000004e-05, | |
| "loss": 54.6399, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.025, | |
| "eval_loss": 13.650457382202148, | |
| "eval_runtime": 6.1864, | |
| "eval_samples_per_second": 323.289, | |
| "eval_steps_per_second": 20.206, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.026, | |
| "grad_norm": 4.444192886352539, | |
| "learning_rate": 5.1800000000000005e-05, | |
| "loss": 54.3902, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.027, | |
| "grad_norm": 7.780248641967773, | |
| "learning_rate": 5.380000000000001e-05, | |
| "loss": 54.1046, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.028, | |
| "grad_norm": 2.7638983726501465, | |
| "learning_rate": 5.580000000000001e-05, | |
| "loss": 54.0532, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.029, | |
| "grad_norm": 2.145719051361084, | |
| "learning_rate": 5.7799999999999995e-05, | |
| "loss": 53.8505, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 2.996760606765747, | |
| "learning_rate": 5.9800000000000003e-05, | |
| "loss": 53.6261, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "eval_loss": 13.41512680053711, | |
| "eval_runtime": 6.2516, | |
| "eval_samples_per_second": 319.917, | |
| "eval_steps_per_second": 19.995, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.031, | |
| "grad_norm": 2.4033031463623047, | |
| "learning_rate": 6.18e-05, | |
| "loss": 53.2967, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.032, | |
| "grad_norm": 3.834418773651123, | |
| "learning_rate": 6.38e-05, | |
| "loss": 52.9328, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.033, | |
| "grad_norm": 6.866483211517334, | |
| "learning_rate": 6.58e-05, | |
| "loss": 52.3613, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.034, | |
| "grad_norm": 3.098707675933838, | |
| "learning_rate": 6.780000000000001e-05, | |
| "loss": 52.4274, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.035, | |
| "grad_norm": 9.082242012023926, | |
| "learning_rate": 6.98e-05, | |
| "loss": 52.2897, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.035, | |
| "eval_loss": 13.071863174438477, | |
| "eval_runtime": 6.1905, | |
| "eval_samples_per_second": 323.074, | |
| "eval_steps_per_second": 20.192, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.036, | |
| "grad_norm": 2.5560662746429443, | |
| "learning_rate": 7.18e-05, | |
| "loss": 52.0423, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.037, | |
| "grad_norm": 2.2769477367401123, | |
| "learning_rate": 7.38e-05, | |
| "loss": 51.3791, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.038, | |
| "grad_norm": 2.816531181335449, | |
| "learning_rate": 7.58e-05, | |
| "loss": 51.0433, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.039, | |
| "grad_norm": 4.625880718231201, | |
| "learning_rate": 7.780000000000001e-05, | |
| "loss": 49.9025, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 3.8817570209503174, | |
| "learning_rate": 7.98e-05, | |
| "loss": 49.2651, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "eval_loss": 12.226531982421875, | |
| "eval_runtime": 6.218, | |
| "eval_samples_per_second": 321.647, | |
| "eval_steps_per_second": 20.103, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.041, | |
| "grad_norm": 2.100292205810547, | |
| "learning_rate": 8.18e-05, | |
| "loss": 48.4186, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.042, | |
| "grad_norm": 2.875107765197754, | |
| "learning_rate": 8.38e-05, | |
| "loss": 47.5267, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.043, | |
| "grad_norm": 3.6629738807678223, | |
| "learning_rate": 8.58e-05, | |
| "loss": 46.0668, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.044, | |
| "grad_norm": 6.750997543334961, | |
| "learning_rate": 8.78e-05, | |
| "loss": 45.6252, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.045, | |
| "grad_norm": 4.621273040771484, | |
| "learning_rate": 8.98e-05, | |
| "loss": 44.6924, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.045, | |
| "eval_loss": 11.125019073486328, | |
| "eval_runtime": 6.1835, | |
| "eval_samples_per_second": 323.442, | |
| "eval_steps_per_second": 20.215, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.046, | |
| "grad_norm": 7.5620856285095215, | |
| "learning_rate": 9.180000000000001e-05, | |
| "loss": 43.9264, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.047, | |
| "grad_norm": 4.797828197479248, | |
| "learning_rate": 9.38e-05, | |
| "loss": 43.1773, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.048, | |
| "grad_norm": 6.183513641357422, | |
| "learning_rate": 9.58e-05, | |
| "loss": 42.4152, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.049, | |
| "grad_norm": 2.3638603687286377, | |
| "learning_rate": 9.78e-05, | |
| "loss": 41.8333, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 3.4479477405548096, | |
| "learning_rate": 9.98e-05, | |
| "loss": 41.2254, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "eval_loss": 10.2489013671875, | |
| "eval_runtime": 6.1973, | |
| "eval_samples_per_second": 322.722, | |
| "eval_steps_per_second": 20.17, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.051, | |
| "grad_norm": 3.256962537765503, | |
| "learning_rate": 9.911197057469107e-05, | |
| "loss": 40.7175, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.052, | |
| "grad_norm": 4.1723175048828125, | |
| "learning_rate": 9.815249038111776e-05, | |
| "loss": 39.9997, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.053, | |
| "grad_norm": 2.062711477279663, | |
| "learning_rate": 9.722034684781694e-05, | |
| "loss": 38.5916, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.054, | |
| "grad_norm": 2.106576681137085, | |
| "learning_rate": 9.631426606617744e-05, | |
| "loss": 37.6645, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.055, | |
| "grad_norm": 3.3942205905914307, | |
| "learning_rate": 9.543305571897804e-05, | |
| "loss": 36.591, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.055, | |
| "eval_loss": 9.059900283813477, | |
| "eval_runtime": 6.1773, | |
| "eval_samples_per_second": 323.764, | |
| "eval_steps_per_second": 20.235, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.056, | |
| "grad_norm": 5.337911128997803, | |
| "learning_rate": 9.457559848219179e-05, | |
| "loss": 35.6712, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.057, | |
| "grad_norm": 5.007609844207764, | |
| "learning_rate": 9.374084606744877e-05, | |
| "loss": 34.8073, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.058, | |
| "grad_norm": 3.1866860389709473, | |
| "learning_rate": 9.292781383291611e-05, | |
| "loss": 34.288, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.059, | |
| "grad_norm": 3.2338063716888428, | |
| "learning_rate": 9.213557589959345e-05, | |
| "loss": 33.4251, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 2.3791048526763916, | |
| "learning_rate": 9.136326071794409e-05, | |
| "loss": 32.8373, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "eval_loss": 8.064367294311523, | |
| "eval_runtime": 6.1993, | |
| "eval_samples_per_second": 322.618, | |
| "eval_steps_per_second": 20.164, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.061, | |
| "grad_norm": 2.501295804977417, | |
| "learning_rate": 9.061004703659374e-05, | |
| "loss": 31.4265, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.062, | |
| "grad_norm": 2.6768312454223633, | |
| "learning_rate": 8.987516023070193e-05, | |
| "loss": 30.8398, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.063, | |
| "grad_norm": 2.1591920852661133, | |
| "learning_rate": 8.915786895268651e-05, | |
| "loss": 29.9915, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 1.987754464149475, | |
| "learning_rate": 8.84574820723792e-05, | |
| "loss": 28.8786, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.065, | |
| "grad_norm": 3.341383695602417, | |
| "learning_rate": 8.777334587751072e-05, | |
| "loss": 27.9284, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.065, | |
| "eval_loss": 6.836698532104492, | |
| "eval_runtime": 6.2313, | |
| "eval_samples_per_second": 320.959, | |
| "eval_steps_per_second": 20.06, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.066, | |
| "grad_norm": 3.247847318649292, | |
| "learning_rate": 8.710484150874758e-05, | |
| "loss": 26.8209, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.067, | |
| "grad_norm": 2.0966415405273438, | |
| "learning_rate": 8.645138260640511e-05, | |
| "loss": 26.5482, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.068, | |
| "grad_norm": 2.398731231689453, | |
| "learning_rate": 8.581241314849611e-05, | |
| "loss": 25.8964, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.069, | |
| "grad_norm": 3.018005132675171, | |
| "learning_rate": 8.51874054619982e-05, | |
| "loss": 24.7768, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 2.627410411834717, | |
| "learning_rate": 8.457585839117285e-05, | |
| "loss": 23.7828, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "eval_loss": 6.068182468414307, | |
| "eval_runtime": 6.1922, | |
| "eval_samples_per_second": 322.987, | |
| "eval_steps_per_second": 20.187, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.071, | |
| "grad_norm": 1.910626769065857, | |
| "learning_rate": 8.397729560848631e-05, | |
| "loss": 24.4096, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.072, | |
| "grad_norm": 1.7598278522491455, | |
| "learning_rate": 8.339126405519483e-05, | |
| "loss": 23.4098, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.073, | |
| "grad_norm": 2.161638021469116, | |
| "learning_rate": 8.281733249999222e-05, | |
| "loss": 22.4748, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.074, | |
| "grad_norm": 2.6153981685638428, | |
| "learning_rate": 8.225509020529976e-05, | |
| "loss": 22.4988, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.075, | |
| "grad_norm": 2.6121160984039307, | |
| "learning_rate": 8.170414569182506e-05, | |
| "loss": 21.9317, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.075, | |
| "eval_loss": 5.473199844360352, | |
| "eval_runtime": 6.1666, | |
| "eval_samples_per_second": 324.327, | |
| "eval_steps_per_second": 20.27, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.076, | |
| "grad_norm": 2.71346378326416, | |
| "learning_rate": 8.116412559294567e-05, | |
| "loss": 21.6393, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.077, | |
| "grad_norm": 1.7239511013031006, | |
| "learning_rate": 8.063467359130037e-05, | |
| "loss": 21.3508, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.078, | |
| "grad_norm": 1.9020029306411743, | |
| "learning_rate": 8.011544943070565e-05, | |
| "loss": 20.6425, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.079, | |
| "grad_norm": 1.9728212356567383, | |
| "learning_rate": 7.960612799717214e-05, | |
| "loss": 20.3836, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 2.0105056762695312, | |
| "learning_rate": 7.910639846338163e-05, | |
| "loss": 20.1252, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "eval_loss": 5.180527210235596, | |
| "eval_runtime": 6.2098, | |
| "eval_samples_per_second": 322.073, | |
| "eval_steps_per_second": 20.13, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.081, | |
| "grad_norm": 1.8437271118164062, | |
| "learning_rate": 7.861596349150974e-05, | |
| "loss": 20.5089, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.082, | |
| "grad_norm": 1.525156855583191, | |
| "learning_rate": 7.813453848974926e-05, | |
| "loss": 20.143, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.083, | |
| "grad_norm": 1.9869450330734253, | |
| "learning_rate": 7.766185091831061e-05, | |
| "loss": 20.4813, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.084, | |
| "grad_norm": 1.3000235557556152, | |
| "learning_rate": 7.719763964105497e-05, | |
| "loss": 19.9456, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.085, | |
| "grad_norm": 1.4937679767608643, | |
| "learning_rate": 7.674165431925523e-05, | |
| "loss": 19.5407, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.085, | |
| "eval_loss": 4.915063858032227, | |
| "eval_runtime": 6.1969, | |
| "eval_samples_per_second": 322.74, | |
| "eval_steps_per_second": 20.171, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.086, | |
| "grad_norm": 1.616244912147522, | |
| "learning_rate": 7.629365484428845e-05, | |
| "loss": 19.4324, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.087, | |
| "grad_norm": 1.8139533996582031, | |
| "learning_rate": 7.585341080633831e-05, | |
| "loss": 19.2495, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.088, | |
| "grad_norm": 1.4117282629013062, | |
| "learning_rate": 7.542070099643788e-05, | |
| "loss": 19.334, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.089, | |
| "grad_norm": 1.3447221517562866, | |
| "learning_rate": 7.499531293940736e-05, | |
| "loss": 18.7034, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 1.8889520168304443, | |
| "learning_rate": 7.457704245544709e-05, | |
| "loss": 18.094, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "eval_loss": 4.761510848999023, | |
| "eval_runtime": 6.2364, | |
| "eval_samples_per_second": 320.698, | |
| "eval_steps_per_second": 20.044, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.091, | |
| "grad_norm": 1.7895255088806152, | |
| "learning_rate": 7.41656932483308e-05, | |
| "loss": 18.8197, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.092, | |
| "grad_norm": 3.055880069732666, | |
| "learning_rate": 7.376107651831263e-05, | |
| "loss": 18.4797, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.093, | |
| "grad_norm": 1.4833000898361206, | |
| "learning_rate": 7.336301059801394e-05, | |
| "loss": 18.7489, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.094, | |
| "grad_norm": 1.4441015720367432, | |
| "learning_rate": 7.297132060969499e-05, | |
| "loss": 17.7985, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.095, | |
| "grad_norm": 1.970638394355774, | |
| "learning_rate": 7.258583814244268e-05, | |
| "loss": 17.9167, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.095, | |
| "eval_loss": 4.611672878265381, | |
| "eval_runtime": 6.2185, | |
| "eval_samples_per_second": 321.621, | |
| "eval_steps_per_second": 20.101, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 1.66256582736969, | |
| "learning_rate": 7.220640094792103e-05, | |
| "loss": 18.063, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.097, | |
| "grad_norm": 1.8178454637527466, | |
| "learning_rate": 7.183285265343593e-05, | |
| "loss": 18.5062, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.098, | |
| "grad_norm": 1.5309795141220093, | |
| "learning_rate": 7.14650424911616e-05, | |
| "loss": 18.2083, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.099, | |
| "grad_norm": 1.72297203540802, | |
| "learning_rate": 7.110282504246376e-05, | |
| "loss": 17.6364, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 1.8936313390731812, | |
| "learning_rate": 7.074605999633481e-05, | |
| "loss": 17.6297, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_loss": 4.519549369812012, | |
| "eval_runtime": 6.233, | |
| "eval_samples_per_second": 320.874, | |
| "eval_steps_per_second": 20.055, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.101, | |
| "grad_norm": 1.5446372032165527, | |
| "learning_rate": 7.03946119210298e-05, | |
| "loss": 17.7636, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.102, | |
| "grad_norm": 1.8526910543441772, | |
| "learning_rate": 7.004835004805859e-05, | |
| "loss": 17.4462, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.103, | |
| "grad_norm": 1.521323323249817, | |
| "learning_rate": 6.970714806775237e-05, | |
| "loss": 18.2182, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.104, | |
| "grad_norm": 1.8971052169799805, | |
| "learning_rate": 6.937088393567812e-05, | |
| "loss": 17.8118, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.105, | |
| "grad_norm": 2.105436325073242, | |
| "learning_rate": 6.903943968922749e-05, | |
| "loss": 17.0171, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.105, | |
| "eval_loss": 4.442924499511719, | |
| "eval_runtime": 6.2358, | |
| "eval_samples_per_second": 320.727, | |
| "eval_steps_per_second": 20.045, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.106, | |
| "grad_norm": 1.4423048496246338, | |
| "learning_rate": 6.871270127375409e-05, | |
| "loss": 17.8339, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.107, | |
| "grad_norm": 1.2477686405181885, | |
| "learning_rate": 6.839055837767724e-05, | |
| "loss": 17.8775, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.108, | |
| "grad_norm": 1.5547492504119873, | |
| "learning_rate": 6.807290427601058e-05, | |
| "loss": 16.7857, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.109, | |
| "grad_norm": 2.1341335773468018, | |
| "learning_rate": 6.775963568181182e-05, | |
| "loss": 17.0301, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 2.172020673751831, | |
| "learning_rate": 6.74506526050837e-05, | |
| "loss": 17.62, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "eval_loss": 4.378727912902832, | |
| "eval_runtime": 6.2451, | |
| "eval_samples_per_second": 320.249, | |
| "eval_steps_per_second": 20.016, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.111, | |
| "grad_norm": 1.4341588020324707, | |
| "learning_rate": 6.714585821868878e-05, | |
| "loss": 17.2087, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.112, | |
| "grad_norm": 1.8107764720916748, | |
| "learning_rate": 6.68451587308695e-05, | |
| "loss": 17.0762, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.113, | |
| "grad_norm": 1.6209523677825928, | |
| "learning_rate": 6.654846326399234e-05, | |
| "loss": 17.8408, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.114, | |
| "grad_norm": 1.4772510528564453, | |
| "learning_rate": 6.625568373916034e-05, | |
| "loss": 17.497, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.115, | |
| "grad_norm": 1.7777224779129028, | |
| "learning_rate": 6.596673476636102e-05, | |
| "loss": 17.0651, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.115, | |
| "eval_loss": 4.317691326141357, | |
| "eval_runtime": 6.1999, | |
| "eval_samples_per_second": 322.587, | |
| "eval_steps_per_second": 20.162, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.116, | |
| "grad_norm": 1.7212860584259033, | |
| "learning_rate": 6.568153353983866e-05, | |
| "loss": 17.0681, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.117, | |
| "grad_norm": 1.2618902921676636, | |
| "learning_rate": 6.53999997384e-05, | |
| "loss": 16.4462, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.118, | |
| "grad_norm": 1.5486454963684082, | |
| "learning_rate": 6.512205543038029e-05, | |
| "loss": 17.3137, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.119, | |
| "grad_norm": 1.424975037574768, | |
| "learning_rate": 6.48476249830151e-05, | |
| "loss": 17.0415, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 1.4205785989761353, | |
| "learning_rate": 6.457663497597783e-05, | |
| "loss": 16.9146, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "eval_loss": 4.270516395568848, | |
| "eval_runtime": 6.2291, | |
| "eval_samples_per_second": 321.073, | |
| "eval_steps_per_second": 20.067, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.121, | |
| "grad_norm": 1.6844050884246826, | |
| "learning_rate": 6.430901411885911e-05, | |
| "loss": 17.1692, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.122, | |
| "grad_norm": 1.607489824295044, | |
| "learning_rate": 6.40446931723768e-05, | |
| "loss": 16.319, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.123, | |
| "grad_norm": 1.5149353742599487, | |
| "learning_rate": 6.378360487311965e-05, | |
| "loss": 16.5953, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.124, | |
| "grad_norm": 1.5084587335586548, | |
| "learning_rate": 6.352568386163805e-05, | |
| "loss": 17.0408, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 1.342761516571045, | |
| "learning_rate": 6.327086661370808e-05, | |
| "loss": 16.8208, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "eval_loss": 4.237093448638916, | |
| "eval_runtime": 6.2362, | |
| "eval_samples_per_second": 320.71, | |
| "eval_steps_per_second": 20.044, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.126, | |
| "grad_norm": 1.3136533498764038, | |
| "learning_rate": 6.301909137460409e-05, | |
| "loss": 16.7708, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.127, | |
| "grad_norm": 1.4737874269485474, | |
| "learning_rate": 6.277029809622579e-05, | |
| "loss": 17.0393, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 1.409523367881775, | |
| "learning_rate": 6.252442837693433e-05, | |
| "loss": 16.7568, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.129, | |
| "grad_norm": 1.3414747714996338, | |
| "learning_rate": 6.22814254039606e-05, | |
| "loss": 16.3892, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 1.1919307708740234, | |
| "learning_rate": 6.204123389825647e-05, | |
| "loss": 16.6787, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "eval_loss": 4.198209762573242, | |
| "eval_runtime": 6.2223, | |
| "eval_samples_per_second": 321.426, | |
| "eval_steps_per_second": 20.089, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.131, | |
| "grad_norm": 1.3193895816802979, | |
| "learning_rate": 6.180380006166808e-05, | |
| "loss": 16.7276, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.132, | |
| "grad_norm": 1.4551233053207397, | |
| "learning_rate": 6.156907152631576e-05, | |
| "loss": 16.5019, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.133, | |
| "grad_norm": 1.6626161336898804, | |
| "learning_rate": 6.133699730607301e-05, | |
| "loss": 16.7516, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.134, | |
| "grad_norm": 1.442935824394226, | |
| "learning_rate": 6.110752775004223e-05, | |
| "loss": 16.5704, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.135, | |
| "grad_norm": 1.667401909828186, | |
| "learning_rate": 6.088061449793082e-05, | |
| "loss": 16.4191, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.135, | |
| "eval_loss": 4.162066459655762, | |
| "eval_runtime": 6.2174, | |
| "eval_samples_per_second": 321.679, | |
| "eval_steps_per_second": 20.105, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.136, | |
| "grad_norm": 1.3251672983169556, | |
| "learning_rate": 6.065621043723658e-05, | |
| "loss": 16.1052, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.137, | |
| "grad_norm": 1.4112827777862549, | |
| "learning_rate": 6.043426966215649e-05, | |
| "loss": 16.4661, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.138, | |
| "grad_norm": 1.4340181350708008, | |
| "learning_rate": 6.021474743413714e-05, | |
| "loss": 16.2721, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.139, | |
| "grad_norm": 1.4311639070510864, | |
| "learning_rate": 5.999760014399041e-05, | |
| "loss": 16.1731, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 1.649147868156433, | |
| "learning_rate": 5.978278527550084e-05, | |
| "loss": 16.3638, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_loss": 4.139286518096924, | |
| "eval_runtime": 6.2422, | |
| "eval_samples_per_second": 320.402, | |
| "eval_steps_per_second": 20.025, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.141, | |
| "grad_norm": 1.2151726484298706, | |
| "learning_rate": 5.957026137045648e-05, | |
| "loss": 16.2606, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.142, | |
| "grad_norm": 1.3587441444396973, | |
| "learning_rate": 5.935998799503725e-05, | |
| "loss": 16.083, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.143, | |
| "grad_norm": 1.181125283241272, | |
| "learning_rate": 5.91519257074994e-05, | |
| "loss": 16.117, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.144, | |
| "grad_norm": 1.076505184173584, | |
| "learning_rate": 5.8946036027097295e-05, | |
| "loss": 15.7447, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.145, | |
| "grad_norm": 1.402579665184021, | |
| "learning_rate": 5.8742281404186785e-05, | |
| "loss": 16.4264, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.145, | |
| "eval_loss": 4.133431434631348, | |
| "eval_runtime": 6.1793, | |
| "eval_samples_per_second": 323.661, | |
| "eval_steps_per_second": 20.229, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.146, | |
| "grad_norm": 1.0925723314285278, | |
| "learning_rate": 5.8540625191457576e-05, | |
| "loss": 16.6002, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.147, | |
| "grad_norm": 1.100009799003601, | |
| "learning_rate": 5.834103161624459e-05, | |
| "loss": 16.3612, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.148, | |
| "grad_norm": 1.91799795627594, | |
| "learning_rate": 5.8143465753870694e-05, | |
| "loss": 16.4593, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.149, | |
| "grad_norm": 1.2950007915496826, | |
| "learning_rate": 5.7947893501975715e-05, | |
| "loss": 15.9469, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 1.4627280235290527, | |
| "learning_rate": 5.7754281555789e-05, | |
| "loss": 16.009, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "eval_loss": 4.096825122833252, | |
| "eval_runtime": 6.2109, | |
| "eval_samples_per_second": 322.016, | |
| "eval_steps_per_second": 20.126, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.151, | |
| "grad_norm": 1.588494062423706, | |
| "learning_rate": 5.756259738430475e-05, | |
| "loss": 15.8375, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.152, | |
| "grad_norm": 1.4053016901016235, | |
| "learning_rate": 5.7372809207321355e-05, | |
| "loss": 16.3388, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.153, | |
| "grad_norm": 1.3402225971221924, | |
| "learning_rate": 5.71848859733081e-05, | |
| "loss": 16.3163, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.154, | |
| "grad_norm": 1.195488691329956, | |
| "learning_rate": 5.699879733806412e-05, | |
| "loss": 16.056, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.155, | |
| "grad_norm": 1.3463131189346313, | |
| "learning_rate": 5.681451364413635e-05, | |
| "loss": 16.0528, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.155, | |
| "eval_loss": 4.0687785148620605, | |
| "eval_runtime": 6.2217, | |
| "eval_samples_per_second": 321.456, | |
| "eval_steps_per_second": 20.091, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.156, | |
| "grad_norm": 1.0611999034881592, | |
| "learning_rate": 5.663200590096471e-05, | |
| "loss": 15.7964, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.157, | |
| "grad_norm": 1.2196369171142578, | |
| "learning_rate": 5.645124576572452e-05, | |
| "loss": 16.2541, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.158, | |
| "grad_norm": 1.5234003067016602, | |
| "learning_rate": 5.627220552483715e-05, | |
| "loss": 15.9411, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.159, | |
| "grad_norm": 1.2116581201553345, | |
| "learning_rate": 5.609485807612173e-05, | |
| "loss": 16.2016, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 1.3541016578674316, | |
| "learning_rate": 5.591917691156175e-05, | |
| "loss": 16.0676, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "eval_loss": 4.064485549926758, | |
| "eval_runtime": 6.2348, | |
| "eval_samples_per_second": 320.778, | |
| "eval_steps_per_second": 20.049, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.161, | |
| "grad_norm": 1.320177674293518, | |
| "learning_rate": 5.5745136100661674e-05, | |
| "loss": 15.6234, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.162, | |
| "grad_norm": 1.6600571870803833, | |
| "learning_rate": 5.557271027436971e-05, | |
| "loss": 15.4577, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.163, | |
| "grad_norm": 1.194664478302002, | |
| "learning_rate": 5.540187460954447e-05, | |
| "loss": 15.8888, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.164, | |
| "grad_norm": 1.5018142461776733, | |
| "learning_rate": 5.523260481394348e-05, | |
| "loss": 15.5123, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.165, | |
| "grad_norm": 1.5969749689102173, | |
| "learning_rate": 5.506487711171322e-05, | |
| "loss": 15.4382, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.165, | |
| "eval_loss": 4.040594577789307, | |
| "eval_runtime": 6.2211, | |
| "eval_samples_per_second": 321.486, | |
| "eval_steps_per_second": 20.093, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.166, | |
| "grad_norm": 1.6020385026931763, | |
| "learning_rate": 5.489866822936095e-05, | |
| "loss": 16.4235, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.167, | |
| "grad_norm": 1.2778631448745728, | |
| "learning_rate": 5.4733955382189484e-05, | |
| "loss": 15.8776, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.168, | |
| "grad_norm": 1.5798327922821045, | |
| "learning_rate": 5.457071626117703e-05, | |
| "loss": 15.4616, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.169, | |
| "grad_norm": 1.3043429851531982, | |
| "learning_rate": 5.440892902028488e-05, | |
| "loss": 16.2131, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 1.4358928203582764, | |
| "learning_rate": 5.424857226417659e-05, | |
| "loss": 15.7224, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "eval_loss": 4.0164995193481445, | |
| "eval_runtime": 6.1969, | |
| "eval_samples_per_second": 322.742, | |
| "eval_steps_per_second": 20.171, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.171, | |
| "grad_norm": 1.1965007781982422, | |
| "learning_rate": 5.408962503633292e-05, | |
| "loss": 15.134, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.172, | |
| "grad_norm": 1.2486337423324585, | |
| "learning_rate": 5.39320668075478e-05, | |
| "loss": 16.0313, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.173, | |
| "grad_norm": 1.285971999168396, | |
| "learning_rate": 5.3775877464790436e-05, | |
| "loss": 15.5165, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.174, | |
| "grad_norm": 1.28692626953125, | |
| "learning_rate": 5.362103730042052e-05, | |
| "loss": 15.7254, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.175, | |
| "grad_norm": 1.1224619150161743, | |
| "learning_rate": 5.346752700174288e-05, | |
| "loss": 15.3909, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.175, | |
| "eval_loss": 4.034187316894531, | |
| "eval_runtime": 6.2137, | |
| "eval_samples_per_second": 321.872, | |
| "eval_steps_per_second": 20.117, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.176, | |
| "grad_norm": 1.2035942077636719, | |
| "learning_rate": 5.331532764088928e-05, | |
| "loss": 15.7032, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.177, | |
| "grad_norm": 1.5655755996704102, | |
| "learning_rate": 5.316442066501519e-05, | |
| "loss": 15.5386, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.178, | |
| "grad_norm": 1.363478660583496, | |
| "learning_rate": 5.30147878868001e-05, | |
| "loss": 15.736, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.179, | |
| "grad_norm": 1.1897183656692505, | |
| "learning_rate": 5.2866411475240354e-05, | |
| "loss": 15.4589, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 1.4313431978225708, | |
| "learning_rate": 5.2719273946723746e-05, | |
| "loss": 15.9495, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "eval_loss": 3.990009307861328, | |
| "eval_runtime": 6.2278, | |
| "eval_samples_per_second": 321.142, | |
| "eval_steps_per_second": 20.071, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.181, | |
| "grad_norm": 1.5434056520462036, | |
| "learning_rate": 5.257335815637598e-05, | |
| "loss": 15.8692, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.182, | |
| "grad_norm": 1.4785411357879639, | |
| "learning_rate": 5.242864728966902e-05, | |
| "loss": 16.1013, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.183, | |
| "grad_norm": 1.264227032661438, | |
| "learning_rate": 5.2285124854282266e-05, | |
| "loss": 15.9116, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.184, | |
| "grad_norm": 1.4657548666000366, | |
| "learning_rate": 5.2142774672207326e-05, | |
| "loss": 15.8593, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.185, | |
| "grad_norm": 1.4138890504837036, | |
| "learning_rate": 5.200158087208814e-05, | |
| "loss": 15.9445, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.185, | |
| "eval_loss": 3.9676108360290527, | |
| "eval_runtime": 6.205, | |
| "eval_samples_per_second": 322.322, | |
| "eval_steps_per_second": 20.145, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.186, | |
| "grad_norm": 1.3209441900253296, | |
| "learning_rate": 5.186152788178785e-05, | |
| "loss": 15.2406, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.187, | |
| "grad_norm": 1.2953730821609497, | |
| "learning_rate": 5.172260042117486e-05, | |
| "loss": 15.5566, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.188, | |
| "grad_norm": 1.1381759643554688, | |
| "learning_rate": 5.1584783495120195e-05, | |
| "loss": 16.0945, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.189, | |
| "grad_norm": 1.1710163354873657, | |
| "learning_rate": 5.1448062386699125e-05, | |
| "loss": 15.9861, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 1.3093607425689697, | |
| "learning_rate": 5.1312422650589934e-05, | |
| "loss": 15.7921, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "eval_loss": 3.962754487991333, | |
| "eval_runtime": 6.2302, | |
| "eval_samples_per_second": 321.018, | |
| "eval_steps_per_second": 20.064, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.191, | |
| "grad_norm": 1.4274228811264038, | |
| "learning_rate": 5.117785010666307e-05, | |
| "loss": 15.6979, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 1.5318870544433594, | |
| "learning_rate": 5.104433083375434e-05, | |
| "loss": 15.4107, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.193, | |
| "grad_norm": 1.5562711954116821, | |
| "learning_rate": 5.091185116361582e-05, | |
| "loss": 15.5957, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.194, | |
| "grad_norm": 1.5389816761016846, | |
| "learning_rate": 5.0780397675038636e-05, | |
| "loss": 15.5794, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.195, | |
| "grad_norm": 1.1989519596099854, | |
| "learning_rate": 5.0649957188141786e-05, | |
| "loss": 15.3292, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.195, | |
| "eval_loss": 3.9427313804626465, | |
| "eval_runtime": 6.2462, | |
| "eval_samples_per_second": 320.195, | |
| "eval_steps_per_second": 20.012, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.196, | |
| "grad_norm": 1.64460027217865, | |
| "learning_rate": 5.052051675882142e-05, | |
| "loss": 15.4911, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.197, | |
| "grad_norm": 1.2332730293273926, | |
| "learning_rate": 5.0392063673355584e-05, | |
| "loss": 15.5501, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.198, | |
| "grad_norm": 1.463189959526062, | |
| "learning_rate": 5.026458544315881e-05, | |
| "loss": 15.6168, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.199, | |
| "grad_norm": 1.3763878345489502, | |
| "learning_rate": 5.013806979968224e-05, | |
| "loss": 15.4993, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 1.0232411623001099, | |
| "learning_rate": 5.0012504689453974e-05, | |
| "loss": 15.5746, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "eval_loss": 3.9369847774505615, | |
| "eval_runtime": 6.2415, | |
| "eval_samples_per_second": 320.436, | |
| "eval_steps_per_second": 20.027, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.201, | |
| "grad_norm": 1.4828171730041504, | |
| "learning_rate": 4.988787826925559e-05, | |
| "loss": 15.9385, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.202, | |
| "grad_norm": 1.041183590888977, | |
| "learning_rate": 4.9764178901430006e-05, | |
| "loss": 15.8048, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.203, | |
| "grad_norm": 1.3011362552642822, | |
| "learning_rate": 4.9641395149316935e-05, | |
| "loss": 15.6496, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.204, | |
| "grad_norm": 1.38265860080719, | |
| "learning_rate": 4.951951577281135e-05, | |
| "loss": 15.4042, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.205, | |
| "grad_norm": 1.2859934568405151, | |
| "learning_rate": 4.939852972404162e-05, | |
| "loss": 15.8823, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.205, | |
| "eval_loss": 3.9209940433502197, | |
| "eval_runtime": 7.0368, | |
| "eval_samples_per_second": 284.218, | |
| "eval_steps_per_second": 17.764, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.206, | |
| "grad_norm": 1.7220138311386108, | |
| "learning_rate": 4.927842614316289e-05, | |
| "loss": 15.5192, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.207, | |
| "grad_norm": 1.396269679069519, | |
| "learning_rate": 4.9159194354262706e-05, | |
| "loss": 15.5069, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.208, | |
| "grad_norm": 1.146694540977478, | |
| "learning_rate": 4.904082386137498e-05, | |
| "loss": 15.3511, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.209, | |
| "grad_norm": 1.231331467628479, | |
| "learning_rate": 4.892330434459896e-05, | |
| "loss": 15.0852, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 1.0101878643035889, | |
| "learning_rate": 4.880662565632016e-05, | |
| "loss": 15.1614, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "eval_loss": 3.9238545894622803, | |
| "eval_runtime": 6.2013, | |
| "eval_samples_per_second": 322.513, | |
| "eval_steps_per_second": 20.157, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.211, | |
| "grad_norm": 1.128787875175476, | |
| "learning_rate": 4.869077781752983e-05, | |
| "loss": 14.8515, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.212, | |
| "grad_norm": 1.4294734001159668, | |
| "learning_rate": 4.857575101424013e-05, | |
| "loss": 14.8838, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.213, | |
| "grad_norm": 1.6185399293899536, | |
| "learning_rate": 4.846153559399206e-05, | |
| "loss": 15.0193, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.214, | |
| "grad_norm": 1.1443840265274048, | |
| "learning_rate": 4.834812206245318e-05, | |
| "loss": 15.2785, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.215, | |
| "grad_norm": 1.3191642761230469, | |
| "learning_rate": 4.8235501080102624e-05, | |
| "loss": 15.1923, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.215, | |
| "eval_loss": 3.9039673805236816, | |
| "eval_runtime": 6.2499, | |
| "eval_samples_per_second": 320.003, | |
| "eval_steps_per_second": 20.0, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.216, | |
| "grad_norm": 1.6260229349136353, | |
| "learning_rate": 4.812366345900056e-05, | |
| "loss": 15.2295, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.217, | |
| "grad_norm": 1.3886921405792236, | |
| "learning_rate": 4.801260015963979e-05, | |
| "loss": 15.149, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.218, | |
| "grad_norm": 1.159067988395691, | |
| "learning_rate": 4.790230228787671e-05, | |
| "loss": 14.3841, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.219, | |
| "grad_norm": 1.0690653324127197, | |
| "learning_rate": 4.779276109193975e-05, | |
| "loss": 14.8772, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 1.275758147239685, | |
| "learning_rate": 4.7683967959512366e-05, | |
| "loss": 15.0286, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "eval_loss": 3.9052722454071045, | |
| "eval_runtime": 6.1987, | |
| "eval_samples_per_second": 322.65, | |
| "eval_steps_per_second": 20.166, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.221, | |
| "grad_norm": 1.3399527072906494, | |
| "learning_rate": 4.757591441488914e-05, | |
| "loss": 15.691, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.222, | |
| "grad_norm": 1.5562835931777954, | |
| "learning_rate": 4.746859211620213e-05, | |
| "loss": 15.8526, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.223, | |
| "grad_norm": 1.3359469175338745, | |
| "learning_rate": 4.7361992852715864e-05, | |
| "loss": 15.4991, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.224, | |
| "grad_norm": 1.1983004808425903, | |
| "learning_rate": 4.7256108542188905e-05, | |
| "loss": 15.2238, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.225, | |
| "grad_norm": 1.349929690361023, | |
| "learning_rate": 4.715093122829988e-05, | |
| "loss": 15.3992, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.225, | |
| "eval_loss": 3.880711317062378, | |
| "eval_runtime": 6.2762, | |
| "eval_samples_per_second": 318.664, | |
| "eval_steps_per_second": 19.916, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.226, | |
| "grad_norm": 1.2029528617858887, | |
| "learning_rate": 4.704645307813639e-05, | |
| "loss": 15.5645, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.227, | |
| "grad_norm": 1.2777271270751953, | |
| "learning_rate": 4.6942666379744746e-05, | |
| "loss": 15.4002, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.228, | |
| "grad_norm": 1.5176602602005005, | |
| "learning_rate": 4.683956353973895e-05, | |
| "loss": 15.6002, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.229, | |
| "grad_norm": 1.6124011278152466, | |
| "learning_rate": 4.6737137080967207e-05, | |
| "loss": 15.335, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 1.8375682830810547, | |
| "learning_rate": 4.663537964023428e-05, | |
| "loss": 15.6535, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "eval_loss": 3.8743247985839844, | |
| "eval_runtime": 6.2459, | |
| "eval_samples_per_second": 320.209, | |
| "eval_steps_per_second": 20.013, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.231, | |
| "grad_norm": 1.488021969795227, | |
| "learning_rate": 4.653428396607817e-05, | |
| "loss": 15.6247, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.232, | |
| "grad_norm": 1.5195178985595703, | |
| "learning_rate": 4.643384291659964e-05, | |
| "loss": 15.2655, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.233, | |
| "grad_norm": 1.240750789642334, | |
| "learning_rate": 4.6334049457342925e-05, | |
| "loss": 14.4992, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.234, | |
| "grad_norm": 1.3504952192306519, | |
| "learning_rate": 4.623489665922651e-05, | |
| "loss": 15.07, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.235, | |
| "grad_norm": 1.385376214981079, | |
| "learning_rate": 4.613637769652221e-05, | |
| "loss": 15.1405, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.235, | |
| "eval_loss": 3.872030735015869, | |
| "eval_runtime": 6.2626, | |
| "eval_samples_per_second": 319.357, | |
| "eval_steps_per_second": 19.96, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.236, | |
| "grad_norm": 1.143419861793518, | |
| "learning_rate": 4.603848584488156e-05, | |
| "loss": 14.7073, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.237, | |
| "grad_norm": 1.4339746236801147, | |
| "learning_rate": 4.594121447940805e-05, | |
| "loss": 14.9982, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.238, | |
| "grad_norm": 1.5718977451324463, | |
| "learning_rate": 4.58445570727739e-05, | |
| "loss": 14.811, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.239, | |
| "grad_norm": 1.4219948053359985, | |
| "learning_rate": 4.574850719338033e-05, | |
| "loss": 15.0809, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 1.4401710033416748, | |
| "learning_rate": 4.5653058503559986e-05, | |
| "loss": 15.3598, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_loss": 3.85036563873291, | |
| "eval_runtime": 6.2378, | |
| "eval_samples_per_second": 320.628, | |
| "eval_steps_per_second": 20.039, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.241, | |
| "grad_norm": 1.5921422243118286, | |
| "learning_rate": 4.555820475782052e-05, | |
| "loss": 15.2551, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.242, | |
| "grad_norm": 1.5687378644943237, | |
| "learning_rate": 4.5463939801128054e-05, | |
| "loss": 15.1761, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.243, | |
| "grad_norm": 1.3394169807434082, | |
| "learning_rate": 4.537025756722961e-05, | |
| "loss": 15.4282, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.244, | |
| "grad_norm": 1.3350077867507935, | |
| "learning_rate": 4.527715207701344e-05, | |
| "loss": 15.8026, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.245, | |
| "grad_norm": 1.2768425941467285, | |
| "learning_rate": 4.51846174369061e-05, | |
| "loss": 15.2332, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.245, | |
| "eval_loss": 3.8509819507598877, | |
| "eval_runtime": 6.2712, | |
| "eval_samples_per_second": 318.92, | |
| "eval_steps_per_second": 19.933, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.246, | |
| "grad_norm": 1.13394033908844, | |
| "learning_rate": 4.5092647837305585e-05, | |
| "loss": 14.7858, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.247, | |
| "grad_norm": 1.117067575454712, | |
| "learning_rate": 4.500123755104922e-05, | |
| "loss": 15.0364, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.248, | |
| "grad_norm": 1.2777060270309448, | |
| "learning_rate": 4.4910380931915706e-05, | |
| "loss": 15.4375, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.249, | |
| "grad_norm": 1.1972999572753906, | |
| "learning_rate": 4.4820072413160295e-05, | |
| "loss": 15.2635, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 1.1700772047042847, | |
| "learning_rate": 4.4730306506082105e-05, | |
| "loss": 15.3477, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "eval_loss": 3.8322501182556152, | |
| "eval_runtime": 6.2555, | |
| "eval_samples_per_second": 319.718, | |
| "eval_steps_per_second": 19.982, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.251, | |
| "grad_norm": 1.5145503282546997, | |
| "learning_rate": 4.4641077798623075e-05, | |
| "loss": 14.9236, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 0.252, | |
| "grad_norm": 1.3210171461105347, | |
| "learning_rate": 4.455238095399737e-05, | |
| "loss": 14.8855, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.253, | |
| "grad_norm": 1.3452820777893066, | |
| "learning_rate": 4.446421070935069e-05, | |
| "loss": 15.3636, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 0.254, | |
| "grad_norm": 1.3298344612121582, | |
| "learning_rate": 4.437656187444866e-05, | |
| "loss": 15.6724, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.255, | |
| "grad_norm": 1.1179172992706299, | |
| "learning_rate": 4.428942933039357e-05, | |
| "loss": 15.4457, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.255, | |
| "eval_loss": 3.829561471939087, | |
| "eval_runtime": 8.4255, | |
| "eval_samples_per_second": 237.373, | |
| "eval_steps_per_second": 14.836, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "grad_norm": 1.2972711324691772, | |
| "learning_rate": 4.420280802836862e-05, | |
| "loss": 14.9027, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.257, | |
| "grad_norm": 1.156786322593689, | |
| "learning_rate": 4.4116692988409206e-05, | |
| "loss": 14.8428, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 0.258, | |
| "grad_norm": 1.2626519203186035, | |
| "learning_rate": 4.4031079298200316e-05, | |
| "loss": 15.1462, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.259, | |
| "grad_norm": 1.174372673034668, | |
| "learning_rate": 4.394596211189963e-05, | |
| "loss": 15.5364, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 1.359827995300293, | |
| "learning_rate": 4.3861336648985394e-05, | |
| "loss": 15.2199, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "eval_loss": 3.823606014251709, | |
| "eval_runtime": 6.2399, | |
| "eval_samples_per_second": 320.517, | |
| "eval_steps_per_second": 20.032, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.261, | |
| "grad_norm": 1.157862901687622, | |
| "learning_rate": 4.3777198193128756e-05, | |
| "loss": 14.998, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 0.262, | |
| "grad_norm": 1.2194920778274536, | |
| "learning_rate": 4.369354209108969e-05, | |
| "loss": 14.8853, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.263, | |
| "grad_norm": 1.1531281471252441, | |
| "learning_rate": 4.3610363751636137e-05, | |
| "loss": 14.8103, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 0.264, | |
| "grad_norm": 0.967728316783905, | |
| "learning_rate": 4.352765864448559e-05, | |
| "loss": 14.9321, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.265, | |
| "grad_norm": 1.277216911315918, | |
| "learning_rate": 4.344542229926874e-05, | |
| "loss": 14.5667, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.265, | |
| "eval_loss": 3.816464900970459, | |
| "eval_runtime": 6.1924, | |
| "eval_samples_per_second": 322.979, | |
| "eval_steps_per_second": 20.186, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.266, | |
| "grad_norm": 1.4289199113845825, | |
| "learning_rate": 4.336365030451462e-05, | |
| "loss": 15.0527, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.267, | |
| "grad_norm": 1.1268882751464844, | |
| "learning_rate": 4.3282338306656564e-05, | |
| "loss": 14.8036, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 0.268, | |
| "grad_norm": 1.1748231649398804, | |
| "learning_rate": 4.32014820090587e-05, | |
| "loss": 14.4974, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.269, | |
| "grad_norm": 1.355406403541565, | |
| "learning_rate": 4.312107717106231e-05, | |
| "loss": 14.7474, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 1.3552896976470947, | |
| "learning_rate": 4.30411196070517e-05, | |
| "loss": 15.0418, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_loss": 3.800353527069092, | |
| "eval_runtime": 6.2378, | |
| "eval_samples_per_second": 320.624, | |
| "eval_steps_per_second": 20.039, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.271, | |
| "grad_norm": 1.2844239473342896, | |
| "learning_rate": 4.296160518553892e-05, | |
| "loss": 15.5347, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 0.272, | |
| "grad_norm": 1.2744359970092773, | |
| "learning_rate": 4.288252982826728e-05, | |
| "loss": 15.3665, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.273, | |
| "grad_norm": 1.216260313987732, | |
| "learning_rate": 4.2803889509332595e-05, | |
| "loss": 14.944, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 0.274, | |
| "grad_norm": 1.2304832935333252, | |
| "learning_rate": 4.2725680254322424e-05, | |
| "loss": 14.9361, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.275, | |
| "grad_norm": 1.1939611434936523, | |
| "learning_rate": 4.2647898139472306e-05, | |
| "loss": 15.0726, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.275, | |
| "eval_loss": 3.7999727725982666, | |
| "eval_runtime": 6.2305, | |
| "eval_samples_per_second": 321.001, | |
| "eval_steps_per_second": 20.063, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.276, | |
| "grad_norm": 1.5539189577102661, | |
| "learning_rate": 4.2570539290839e-05, | |
| "loss": 14.7788, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.277, | |
| "grad_norm": 1.3662588596343994, | |
| "learning_rate": 4.2493599883490024e-05, | |
| "loss": 15.1841, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 0.278, | |
| "grad_norm": 1.1410843133926392, | |
| "learning_rate": 4.241707614070937e-05, | |
| "loss": 14.9416, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.279, | |
| "grad_norm": 1.1068998575210571, | |
| "learning_rate": 4.234096433321879e-05, | |
| "loss": 14.4817, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 1.329091191291809, | |
| "learning_rate": 4.226526077841448e-05, | |
| "loss": 14.5635, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "eval_loss": 3.810391902923584, | |
| "eval_runtime": 6.2132, | |
| "eval_samples_per_second": 321.895, | |
| "eval_steps_per_second": 20.118, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.281, | |
| "grad_norm": 1.2415845394134521, | |
| "learning_rate": 4.218996183961868e-05, | |
| "loss": 14.6589, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 0.282, | |
| "grad_norm": 1.3501887321472168, | |
| "learning_rate": 4.2115063925345885e-05, | |
| "loss": 15.0743, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.283, | |
| "grad_norm": 0.9185730218887329, | |
| "learning_rate": 4.204056348858339e-05, | |
| "loss": 15.1358, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 0.284, | |
| "grad_norm": 1.2723404169082642, | |
| "learning_rate": 4.196645702608569e-05, | |
| "loss": 14.9579, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.285, | |
| "grad_norm": 1.2337239980697632, | |
| "learning_rate": 4.18927410776826e-05, | |
| "loss": 15.1503, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.285, | |
| "eval_loss": 3.800377607345581, | |
| "eval_runtime": 10.3739, | |
| "eval_samples_per_second": 192.791, | |
| "eval_steps_per_second": 12.049, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.286, | |
| "grad_norm": 1.2774711847305298, | |
| "learning_rate": 4.181941222560067e-05, | |
| "loss": 15.5372, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.287, | |
| "grad_norm": 1.3295443058013916, | |
| "learning_rate": 4.1746467093797576e-05, | |
| "loss": 14.8642, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "grad_norm": 1.6138548851013184, | |
| "learning_rate": 4.167390234730933e-05, | |
| "loss": 14.1073, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.289, | |
| "grad_norm": 1.4765127897262573, | |
| "learning_rate": 4.1601714691609825e-05, | |
| "loss": 15.0385, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 1.415253758430481, | |
| "learning_rate": 4.152990087198261e-05, | |
| "loss": 14.8696, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "eval_loss": 3.7786402702331543, | |
| "eval_runtime": 6.24, | |
| "eval_samples_per_second": 320.514, | |
| "eval_steps_per_second": 20.032, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.291, | |
| "grad_norm": 1.3082252740859985, | |
| "learning_rate": 4.145845767290457e-05, | |
| "loss": 14.56, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 0.292, | |
| "grad_norm": 1.446778416633606, | |
| "learning_rate": 4.1387381917441095e-05, | |
| "loss": 14.8571, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.293, | |
| "grad_norm": 1.2211068868637085, | |
| "learning_rate": 4.131667046665284e-05, | |
| "loss": 14.6906, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 0.294, | |
| "grad_norm": 1.1490166187286377, | |
| "learning_rate": 4.1246320219013365e-05, | |
| "loss": 14.5224, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.295, | |
| "grad_norm": 0.9107897281646729, | |
| "learning_rate": 4.117632810983782e-05, | |
| "loss": 14.4431, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.295, | |
| "eval_loss": 3.7727034091949463, | |
| "eval_runtime": 6.217, | |
| "eval_samples_per_second": 321.698, | |
| "eval_steps_per_second": 20.106, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.296, | |
| "grad_norm": 1.0651347637176514, | |
| "learning_rate": 4.1106691110722134e-05, | |
| "loss": 13.9779, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.297, | |
| "grad_norm": 1.1543402671813965, | |
| "learning_rate": 4.10374062289927e-05, | |
| "loss": 14.586, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 0.298, | |
| "grad_norm": 1.2242450714111328, | |
| "learning_rate": 4.096847050716615e-05, | |
| "loss": 14.2348, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.299, | |
| "grad_norm": 1.3821468353271484, | |
| "learning_rate": 4.089988102241916e-05, | |
| "loss": 14.2154, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 1.4441609382629395, | |
| "learning_rate": 4.083163488606789e-05, | |
| "loss": 14.9342, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "eval_loss": 3.770812749862671, | |
| "eval_runtime": 6.2111, | |
| "eval_samples_per_second": 322.006, | |
| "eval_steps_per_second": 20.125, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.301, | |
| "grad_norm": 1.5218119621276855, | |
| "learning_rate": 4.076372924305703e-05, | |
| "loss": 14.6337, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 0.302, | |
| "grad_norm": 1.123818039894104, | |
| "learning_rate": 4.0696161271458113e-05, | |
| "loss": 14.4631, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 0.303, | |
| "grad_norm": 1.3742114305496216, | |
| "learning_rate": 4.0628928181976975e-05, | |
| "loss": 15.2754, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 0.304, | |
| "grad_norm": 1.4702446460723877, | |
| "learning_rate": 4.0562027217470115e-05, | |
| "loss": 15.2532, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 0.305, | |
| "grad_norm": 1.0198601484298706, | |
| "learning_rate": 4.0495455652469793e-05, | |
| "loss": 15.0958, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.305, | |
| "eval_loss": 3.7610201835632324, | |
| "eval_runtime": 6.2419, | |
| "eval_samples_per_second": 320.415, | |
| "eval_steps_per_second": 20.026, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.306, | |
| "grad_norm": 1.2997148036956787, | |
| "learning_rate": 4.0429210792717695e-05, | |
| "loss": 15.122, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 0.307, | |
| "grad_norm": 1.367250919342041, | |
| "learning_rate": 4.03632899747069e-05, | |
| "loss": 14.633, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 0.308, | |
| "grad_norm": 1.1207269430160522, | |
| "learning_rate": 4.0297690565232124e-05, | |
| "loss": 14.3819, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 0.309, | |
| "grad_norm": 1.5955203771591187, | |
| "learning_rate": 4.0232409960947924e-05, | |
| "loss": 14.4362, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 1.3510704040527344, | |
| "learning_rate": 4.0167445587934724e-05, | |
| "loss": 14.9327, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "eval_loss": 3.744568347930908, | |
| "eval_runtime": 6.2372, | |
| "eval_samples_per_second": 320.657, | |
| "eval_steps_per_second": 20.041, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.311, | |
| "grad_norm": 1.2507836818695068, | |
| "learning_rate": 4.0102794901272596e-05, | |
| "loss": 14.4538, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 0.312, | |
| "grad_norm": 1.300366997718811, | |
| "learning_rate": 4.00384553846225e-05, | |
| "loss": 14.086, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 0.313, | |
| "grad_norm": 1.5376310348510742, | |
| "learning_rate": 3.997442454981493e-05, | |
| "loss": 14.7837, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 0.314, | |
| "grad_norm": 1.0232264995574951, | |
| "learning_rate": 3.991069993644577e-05, | |
| "loss": 14.8787, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 0.315, | |
| "grad_norm": 1.519584059715271, | |
| "learning_rate": 3.984727911147916e-05, | |
| "loss": 15.0554, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.315, | |
| "eval_loss": 3.735930919647217, | |
| "eval_runtime": 6.2326, | |
| "eval_samples_per_second": 320.894, | |
| "eval_steps_per_second": 20.056, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.316, | |
| "grad_norm": 1.5088673830032349, | |
| "learning_rate": 3.978415966885739e-05, | |
| "loss": 15.0307, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 0.317, | |
| "grad_norm": 1.2005739212036133, | |
| "learning_rate": 3.972133922911742e-05, | |
| "loss": 14.6702, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 0.318, | |
| "grad_norm": 1.2146950960159302, | |
| "learning_rate": 3.9658815439014244e-05, | |
| "loss": 15.1964, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 0.319, | |
| "grad_norm": 1.1482326984405518, | |
| "learning_rate": 3.959658597115049e-05, | |
| "loss": 15.2801, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.9639500975608826, | |
| "learning_rate": 3.953464852361267e-05, | |
| "loss": 14.9487, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "eval_loss": 3.7448246479034424, | |
| "eval_runtime": 6.4314, | |
| "eval_samples_per_second": 310.973, | |
| "eval_steps_per_second": 19.436, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.321, | |
| "grad_norm": 1.2903177738189697, | |
| "learning_rate": 3.9473000819613454e-05, | |
| "loss": 14.7958, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 0.322, | |
| "grad_norm": 1.5427738428115845, | |
| "learning_rate": 3.941164060714018e-05, | |
| "loss": 14.3846, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 0.323, | |
| "grad_norm": 1.1496590375900269, | |
| "learning_rate": 3.9350565658609336e-05, | |
| "loss": 14.7951, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 0.324, | |
| "grad_norm": 1.3798539638519287, | |
| "learning_rate": 3.928977377052684e-05, | |
| "loss": 14.4979, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 0.325, | |
| "grad_norm": 1.2108612060546875, | |
| "learning_rate": 3.922926276315421e-05, | |
| "loss": 14.7947, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.325, | |
| "eval_loss": 3.73313307762146, | |
| "eval_runtime": 6.239, | |
| "eval_samples_per_second": 320.563, | |
| "eval_steps_per_second": 20.035, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.326, | |
| "grad_norm": 1.2491366863250732, | |
| "learning_rate": 3.916903048018023e-05, | |
| "loss": 15.05, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 0.327, | |
| "grad_norm": 1.5242091417312622, | |
| "learning_rate": 3.910907478839825e-05, | |
| "loss": 15.0895, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 0.328, | |
| "grad_norm": 1.006120204925537, | |
| "learning_rate": 3.904939357738885e-05, | |
| "loss": 15.0252, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 0.329, | |
| "grad_norm": 1.4161890745162964, | |
| "learning_rate": 3.8989984759207835e-05, | |
| "loss": 14.7367, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 1.2856372594833374, | |
| "learning_rate": 3.893084626807942e-05, | |
| "loss": 14.7509, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "eval_loss": 3.728989601135254, | |
| "eval_runtime": 6.2598, | |
| "eval_samples_per_second": 319.499, | |
| "eval_steps_per_second": 19.969, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.331, | |
| "grad_norm": 1.3783438205718994, | |
| "learning_rate": 3.887197606009451e-05, | |
| "loss": 14.5589, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 0.332, | |
| "grad_norm": 1.2106647491455078, | |
| "learning_rate": 3.881337211291406e-05, | |
| "loss": 14.1562, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 0.333, | |
| "grad_norm": 1.212920069694519, | |
| "learning_rate": 3.875503242547721e-05, | |
| "loss": 14.6982, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 0.334, | |
| "grad_norm": 1.0120047330856323, | |
| "learning_rate": 3.869695501771434e-05, | |
| "loss": 14.6513, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 0.335, | |
| "grad_norm": 1.1318042278289795, | |
| "learning_rate": 3.8639137930264805e-05, | |
| "loss": 14.9368, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.335, | |
| "eval_loss": 3.7159037590026855, | |
| "eval_runtime": 6.2602, | |
| "eval_samples_per_second": 319.479, | |
| "eval_steps_per_second": 19.967, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.336, | |
| "grad_norm": 1.1732110977172852, | |
| "learning_rate": 3.858157922419927e-05, | |
| "loss": 14.6541, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 0.337, | |
| "grad_norm": 1.2244794368743896, | |
| "learning_rate": 3.8524276980746626e-05, | |
| "loss": 14.7344, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 0.338, | |
| "grad_norm": 1.1306333541870117, | |
| "learning_rate": 3.8467229301025334e-05, | |
| "loss": 14.5686, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 0.339, | |
| "grad_norm": 1.3868407011032104, | |
| "learning_rate": 3.8410434305779104e-05, | |
| "loss": 14.664, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 1.124047875404358, | |
| "learning_rate": 3.835389013511689e-05, | |
| "loss": 14.7722, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "eval_loss": 3.7149546146392822, | |
| "eval_runtime": 7.4746, | |
| "eval_samples_per_second": 267.574, | |
| "eval_steps_per_second": 16.723, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.341, | |
| "grad_norm": 1.340169072151184, | |
| "learning_rate": 3.829759494825711e-05, | |
| "loss": 14.5645, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 0.342, | |
| "grad_norm": 1.1147572994232178, | |
| "learning_rate": 3.82415469232759e-05, | |
| "loss": 14.568, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 0.343, | |
| "grad_norm": 0.9722641110420227, | |
| "learning_rate": 3.8185744256859485e-05, | |
| "loss": 14.4606, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 0.344, | |
| "grad_norm": 1.1598691940307617, | |
| "learning_rate": 3.813018516406046e-05, | |
| "loss": 14.4404, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 0.345, | |
| "grad_norm": 1.1491451263427734, | |
| "learning_rate": 3.807486787805802e-05, | |
| "loss": 14.3155, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.345, | |
| "eval_loss": 3.7217018604278564, | |
| "eval_runtime": 10.2678, | |
| "eval_samples_per_second": 194.784, | |
| "eval_steps_per_second": 12.174, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.346, | |
| "grad_norm": 1.2273186445236206, | |
| "learning_rate": 3.801979064992194e-05, | |
| "loss": 14.5827, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 0.347, | |
| "grad_norm": 1.2851728200912476, | |
| "learning_rate": 3.796495174838033e-05, | |
| "loss": 14.7868, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 0.348, | |
| "grad_norm": 1.3409245014190674, | |
| "learning_rate": 3.7910349459591094e-05, | |
| "loss": 14.4866, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 0.349, | |
| "grad_norm": 1.512802004814148, | |
| "learning_rate": 3.785598208691693e-05, | |
| "loss": 14.3369, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 1.238440752029419, | |
| "learning_rate": 3.780184795070394e-05, | |
| "loss": 14.376, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "eval_loss": 3.7125015258789062, | |
| "eval_runtime": 6.2266, | |
| "eval_samples_per_second": 321.203, | |
| "eval_steps_per_second": 20.075, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.351, | |
| "grad_norm": 1.2165943384170532, | |
| "learning_rate": 3.7747945388063626e-05, | |
| "loss": 14.7182, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 0.352, | |
| "grad_norm": 1.1595171689987183, | |
| "learning_rate": 3.7694272752658386e-05, | |
| "loss": 14.8565, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 0.353, | |
| "grad_norm": 1.294655442237854, | |
| "learning_rate": 3.764082841449032e-05, | |
| "loss": 15.2285, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 0.354, | |
| "grad_norm": 1.2549623250961304, | |
| "learning_rate": 3.758761075969328e-05, | |
| "loss": 14.7526, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 0.355, | |
| "grad_norm": 1.194077968597412, | |
| "learning_rate": 3.7534618190328195e-05, | |
| "loss": 14.485, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.355, | |
| "eval_loss": 3.7080776691436768, | |
| "eval_runtime": 6.2647, | |
| "eval_samples_per_second": 319.25, | |
| "eval_steps_per_second": 19.953, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.356, | |
| "grad_norm": 1.0828840732574463, | |
| "learning_rate": 3.748184912418159e-05, | |
| "loss": 14.3297, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 0.357, | |
| "grad_norm": 1.0104490518569946, | |
| "learning_rate": 3.742930199456709e-05, | |
| "loss": 14.2306, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 0.358, | |
| "grad_norm": 1.086410641670227, | |
| "learning_rate": 3.7376975250130133e-05, | |
| "loss": 14.0265, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 0.359, | |
| "grad_norm": 1.0463649034500122, | |
| "learning_rate": 3.732486735465553e-05, | |
| "loss": 13.999, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 1.2992103099822998, | |
| "learning_rate": 3.727297678687811e-05, | |
| "loss": 14.3544, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "eval_loss": 3.7135140895843506, | |
| "eval_runtime": 6.2372, | |
| "eval_samples_per_second": 320.659, | |
| "eval_steps_per_second": 20.041, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.361, | |
| "grad_norm": 1.1995879411697388, | |
| "learning_rate": 3.7221302040296116e-05, | |
| "loss": 14.5238, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 0.362, | |
| "grad_norm": 1.3045077323913574, | |
| "learning_rate": 3.7169841622987545e-05, | |
| "loss": 14.9165, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 0.363, | |
| "grad_norm": 1.0523598194122314, | |
| "learning_rate": 3.711859405742924e-05, | |
| "loss": 14.4459, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 0.364, | |
| "grad_norm": 1.0935602188110352, | |
| "learning_rate": 3.706755788031864e-05, | |
| "loss": 14.0357, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 0.365, | |
| "grad_norm": 1.342834711074829, | |
| "learning_rate": 3.701673164239839e-05, | |
| "loss": 14.2318, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.365, | |
| "eval_loss": 3.708907127380371, | |
| "eval_runtime": 6.2576, | |
| "eval_samples_per_second": 319.613, | |
| "eval_steps_per_second": 19.976, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.366, | |
| "grad_norm": 1.0739957094192505, | |
| "learning_rate": 3.696611390828343e-05, | |
| "loss": 14.4032, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 0.367, | |
| "grad_norm": 1.2032370567321777, | |
| "learning_rate": 3.691570325629073e-05, | |
| "loss": 14.6113, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 0.368, | |
| "grad_norm": 1.155275583267212, | |
| "learning_rate": 3.6865498278271596e-05, | |
| "loss": 14.2557, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 0.369, | |
| "grad_norm": 1.1695798635482788, | |
| "learning_rate": 3.681549757944646e-05, | |
| "loss": 14.816, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 1.3971259593963623, | |
| "learning_rate": 3.676569977824213e-05, | |
| "loss": 14.6923, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "eval_loss": 3.6910042762756348, | |
| "eval_runtime": 6.2304, | |
| "eval_samples_per_second": 321.008, | |
| "eval_steps_per_second": 20.063, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.371, | |
| "grad_norm": 1.255376935005188, | |
| "learning_rate": 3.6716103506131446e-05, | |
| "loss": 14.0136, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 0.372, | |
| "grad_norm": 1.3371446132659912, | |
| "learning_rate": 3.6666707407475314e-05, | |
| "loss": 14.5684, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 0.373, | |
| "grad_norm": 1.2590174674987793, | |
| "learning_rate": 3.661751013936708e-05, | |
| "loss": 14.4847, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 0.374, | |
| "grad_norm": 1.210468053817749, | |
| "learning_rate": 3.656851037147919e-05, | |
| "loss": 14.6346, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 1.449440836906433, | |
| "learning_rate": 3.65197067859121e-05, | |
| "loss": 14.9902, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "eval_loss": 3.7001209259033203, | |
| "eval_runtime": 6.3384, | |
| "eval_samples_per_second": 315.535, | |
| "eval_steps_per_second": 19.721, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.376, | |
| "grad_norm": 1.287975549697876, | |
| "learning_rate": 3.6471098077045404e-05, | |
| "loss": 15.1399, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 0.377, | |
| "grad_norm": 1.300921082496643, | |
| "learning_rate": 3.642268295139107e-05, | |
| "loss": 14.7123, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 0.378, | |
| "grad_norm": 1.0219051837921143, | |
| "learning_rate": 3.6374460127448995e-05, | |
| "loss": 14.4742, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 0.379, | |
| "grad_norm": 1.0501158237457275, | |
| "learning_rate": 3.632642833556441e-05, | |
| "loss": 14.4047, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 1.4280771017074585, | |
| "learning_rate": 3.627858631778756e-05, | |
| "loss": 14.627, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "eval_loss": 3.6846299171447754, | |
| "eval_runtime": 6.2689, | |
| "eval_samples_per_second": 319.034, | |
| "eval_steps_per_second": 19.94, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.381, | |
| "grad_norm": 1.6403334140777588, | |
| "learning_rate": 3.623093282773527e-05, | |
| "loss": 14.751, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 0.382, | |
| "grad_norm": 1.3171409368515015, | |
| "learning_rate": 3.6183466630454636e-05, | |
| "loss": 14.4652, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 0.383, | |
| "grad_norm": 1.3603215217590332, | |
| "learning_rate": 3.6136186502288535e-05, | |
| "loss": 14.1708, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 1.2372150421142578, | |
| "learning_rate": 3.608909123074314e-05, | |
| "loss": 14.2904, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 0.385, | |
| "grad_norm": 1.4466358423233032, | |
| "learning_rate": 3.6042179614357375e-05, | |
| "loss": 14.2487, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.385, | |
| "eval_loss": 3.687807083129883, | |
| "eval_runtime": 6.2515, | |
| "eval_samples_per_second": 319.922, | |
| "eval_steps_per_second": 19.995, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.386, | |
| "grad_norm": 1.2979004383087158, | |
| "learning_rate": 3.5995450462574126e-05, | |
| "loss": 14.723, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 0.387, | |
| "grad_norm": 1.2666685581207275, | |
| "learning_rate": 3.594890259561335e-05, | |
| "loss": 14.6014, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 0.388, | |
| "grad_norm": 1.3407564163208008, | |
| "learning_rate": 3.5902534844346976e-05, | |
| "loss": 14.5131, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 0.389, | |
| "grad_norm": 1.1121426820755005, | |
| "learning_rate": 3.5856346050175565e-05, | |
| "loss": 14.7588, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.9877298474311829, | |
| "learning_rate": 3.581033506490671e-05, | |
| "loss": 14.5174, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "eval_loss": 3.682448148727417, | |
| "eval_runtime": 6.2568, | |
| "eval_samples_per_second": 319.654, | |
| "eval_steps_per_second": 19.978, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.391, | |
| "grad_norm": 1.2272289991378784, | |
| "learning_rate": 3.576450075063519e-05, | |
| "loss": 14.6118, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 0.392, | |
| "grad_norm": 1.2810298204421997, | |
| "learning_rate": 3.571884197962469e-05, | |
| "loss": 14.4994, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 0.393, | |
| "grad_norm": 1.2651106119155884, | |
| "learning_rate": 3.567335763419138e-05, | |
| "loss": 14.7608, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 0.394, | |
| "grad_norm": 1.2401210069656372, | |
| "learning_rate": 3.562804660658888e-05, | |
| "loss": 14.2945, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 0.395, | |
| "grad_norm": 1.401061773300171, | |
| "learning_rate": 3.5582907798895035e-05, | |
| "loss": 14.6083, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.395, | |
| "eval_loss": 3.66426420211792, | |
| "eval_runtime": 6.2582, | |
| "eval_samples_per_second": 319.58, | |
| "eval_steps_per_second": 19.974, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.396, | |
| "grad_norm": 1.347406268119812, | |
| "learning_rate": 3.55379401229001e-05, | |
| "loss": 14.2524, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 0.397, | |
| "grad_norm": 1.1248286962509155, | |
| "learning_rate": 3.5493142499996666e-05, | |
| "loss": 14.7505, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 0.398, | |
| "grad_norm": 1.3772094249725342, | |
| "learning_rate": 3.544851386107085e-05, | |
| "loss": 14.7012, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 0.399, | |
| "grad_norm": 1.454031229019165, | |
| "learning_rate": 3.540405314639526e-05, | |
| "loss": 14.6776, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 1.1234098672866821, | |
| "learning_rate": 3.535975930552322e-05, | |
| "loss": 14.6502, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "eval_loss": 3.6687095165252686, | |
| "eval_runtime": 6.2384, | |
| "eval_samples_per_second": 320.595, | |
| "eval_steps_per_second": 20.037, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.401, | |
| "grad_norm": 1.2931911945343018, | |
| "learning_rate": 3.531563129718458e-05, | |
| "loss": 14.8039, | |
| "step": 4010 | |
| }, | |
| { | |
| "epoch": 0.402, | |
| "grad_norm": 1.2497082948684692, | |
| "learning_rate": 3.527166808918287e-05, | |
| "loss": 14.8149, | |
| "step": 4020 | |
| }, | |
| { | |
| "epoch": 0.403, | |
| "grad_norm": 1.4773260354995728, | |
| "learning_rate": 3.522786865829391e-05, | |
| "loss": 14.7316, | |
| "step": 4030 | |
| }, | |
| { | |
| "epoch": 0.404, | |
| "grad_norm": 1.3319817781448364, | |
| "learning_rate": 3.51842319901658e-05, | |
| "loss": 14.2236, | |
| "step": 4040 | |
| }, | |
| { | |
| "epoch": 0.405, | |
| "grad_norm": 1.094673991203308, | |
| "learning_rate": 3.5140757079220216e-05, | |
| "loss": 14.4414, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.405, | |
| "eval_loss": 3.6671693325042725, | |
| "eval_runtime": 6.278, | |
| "eval_samples_per_second": 318.575, | |
| "eval_steps_per_second": 19.911, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.406, | |
| "grad_norm": 1.2040002346038818, | |
| "learning_rate": 3.509744292855511e-05, | |
| "loss": 14.3051, | |
| "step": 4060 | |
| }, | |
| { | |
| "epoch": 0.407, | |
| "grad_norm": 1.0459996461868286, | |
| "learning_rate": 3.505428854984869e-05, | |
| "loss": 14.4803, | |
| "step": 4070 | |
| }, | |
| { | |
| "epoch": 0.408, | |
| "grad_norm": 1.1268916130065918, | |
| "learning_rate": 3.5011292963264705e-05, | |
| "loss": 14.8989, | |
| "step": 4080 | |
| }, | |
| { | |
| "epoch": 0.409, | |
| "grad_norm": 1.1444742679595947, | |
| "learning_rate": 3.496845519735901e-05, | |
| "loss": 14.7764, | |
| "step": 4090 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 1.5272349119186401, | |
| "learning_rate": 3.492577428898734e-05, | |
| "loss": 14.3487, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "eval_loss": 3.6532514095306396, | |
| "eval_runtime": 6.2351, | |
| "eval_samples_per_second": 320.767, | |
| "eval_steps_per_second": 20.048, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.411, | |
| "grad_norm": 1.4240920543670654, | |
| "learning_rate": 3.4883249283214416e-05, | |
| "loss": 14.7764, | |
| "step": 4110 | |
| }, | |
| { | |
| "epoch": 0.412, | |
| "grad_norm": 1.325265884399414, | |
| "learning_rate": 3.4840879233224196e-05, | |
| "loss": 14.2673, | |
| "step": 4120 | |
| }, | |
| { | |
| "epoch": 0.413, | |
| "grad_norm": 1.321703314781189, | |
| "learning_rate": 3.4798663200231265e-05, | |
| "loss": 13.9183, | |
| "step": 4130 | |
| }, | |
| { | |
| "epoch": 0.414, | |
| "grad_norm": 1.1967962980270386, | |
| "learning_rate": 3.475660025339355e-05, | |
| "loss": 14.1222, | |
| "step": 4140 | |
| }, | |
| { | |
| "epoch": 0.415, | |
| "grad_norm": 1.0946017503738403, | |
| "learning_rate": 3.471468946972612e-05, | |
| "loss": 14.6894, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 0.415, | |
| "eval_loss": 3.6469974517822266, | |
| "eval_runtime": 6.2794, | |
| "eval_samples_per_second": 318.5, | |
| "eval_steps_per_second": 19.906, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 0.416, | |
| "grad_norm": 1.444520354270935, | |
| "learning_rate": 3.467292993401603e-05, | |
| "loss": 14.8196, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 0.417, | |
| "grad_norm": 1.612980842590332, | |
| "learning_rate": 3.4631320738738494e-05, | |
| "loss": 13.9702, | |
| "step": 4170 | |
| }, | |
| { | |
| "epoch": 0.418, | |
| "grad_norm": 1.541488528251648, | |
| "learning_rate": 3.458986098397395e-05, | |
| "loss": 14.4179, | |
| "step": 4180 | |
| }, | |
| { | |
| "epoch": 0.419, | |
| "grad_norm": 1.2451997995376587, | |
| "learning_rate": 3.45485497773264e-05, | |
| "loss": 14.3002, | |
| "step": 4190 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 1.0385589599609375, | |
| "learning_rate": 3.450738623384265e-05, | |
| "loss": 14.2547, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "eval_loss": 3.649686098098755, | |
| "eval_runtime": 6.2648, | |
| "eval_samples_per_second": 319.246, | |
| "eval_steps_per_second": 19.953, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.421, | |
| "grad_norm": 1.3172301054000854, | |
| "learning_rate": 3.4466369475932744e-05, | |
| "loss": 14.6115, | |
| "step": 4210 | |
| }, | |
| { | |
| "epoch": 0.422, | |
| "grad_norm": 1.1311509609222412, | |
| "learning_rate": 3.442549863329138e-05, | |
| "loss": 14.6898, | |
| "step": 4220 | |
| }, | |
| { | |
| "epoch": 0.423, | |
| "grad_norm": 1.1822139024734497, | |
| "learning_rate": 3.438477284282031e-05, | |
| "loss": 14.5849, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 0.424, | |
| "grad_norm": 1.2009106874465942, | |
| "learning_rate": 3.4344191248551814e-05, | |
| "loss": 14.6419, | |
| "step": 4240 | |
| }, | |
| { | |
| "epoch": 0.425, | |
| "grad_norm": 1.336679458618164, | |
| "learning_rate": 3.4303753001573164e-05, | |
| "loss": 14.5884, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.425, | |
| "eval_loss": 3.650291919708252, | |
| "eval_runtime": 6.2357, | |
| "eval_samples_per_second": 320.734, | |
| "eval_steps_per_second": 20.046, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.426, | |
| "grad_norm": 1.2796190977096558, | |
| "learning_rate": 3.426345725995197e-05, | |
| "loss": 13.906, | |
| "step": 4260 | |
| }, | |
| { | |
| "epoch": 0.427, | |
| "grad_norm": 1.253543496131897, | |
| "learning_rate": 3.422330318866262e-05, | |
| "loss": 14.2191, | |
| "step": 4270 | |
| }, | |
| { | |
| "epoch": 0.428, | |
| "grad_norm": 1.1889468431472778, | |
| "learning_rate": 3.4183289959513575e-05, | |
| "loss": 14.1989, | |
| "step": 4280 | |
| }, | |
| { | |
| "epoch": 0.429, | |
| "grad_norm": 1.2647627592086792, | |
| "learning_rate": 3.414341675107563e-05, | |
| "loss": 14.2794, | |
| "step": 4290 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 1.3583110570907593, | |
| "learning_rate": 3.4103682748611054e-05, | |
| "loss": 14.8572, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "eval_loss": 3.6479427814483643, | |
| "eval_runtime": 6.2542, | |
| "eval_samples_per_second": 319.787, | |
| "eval_steps_per_second": 19.987, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.431, | |
| "grad_norm": 1.3158838748931885, | |
| "learning_rate": 3.4064087144003755e-05, | |
| "loss": 14.4486, | |
| "step": 4310 | |
| }, | |
| { | |
| "epoch": 0.432, | |
| "grad_norm": 1.2332990169525146, | |
| "learning_rate": 3.402462913569015e-05, | |
| "loss": 14.2708, | |
| "step": 4320 | |
| }, | |
| { | |
| "epoch": 0.433, | |
| "grad_norm": 1.1926401853561401, | |
| "learning_rate": 3.3985307928591074e-05, | |
| "loss": 14.385, | |
| "step": 4330 | |
| }, | |
| { | |
| "epoch": 0.434, | |
| "grad_norm": 1.3099074363708496, | |
| "learning_rate": 3.3946122734044455e-05, | |
| "loss": 14.6973, | |
| "step": 4340 | |
| }, | |
| { | |
| "epoch": 0.435, | |
| "grad_norm": 1.0637519359588623, | |
| "learning_rate": 3.390707276973892e-05, | |
| "loss": 14.905, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 0.435, | |
| "eval_loss": 3.638732433319092, | |
| "eval_runtime": 6.2436, | |
| "eval_samples_per_second": 320.326, | |
| "eval_steps_per_second": 20.02, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 0.436, | |
| "grad_norm": 1.257131576538086, | |
| "learning_rate": 3.3868157259648156e-05, | |
| "loss": 14.6147, | |
| "step": 4360 | |
| }, | |
| { | |
| "epoch": 0.437, | |
| "grad_norm": 1.0288323163986206, | |
| "learning_rate": 3.382937543396614e-05, | |
| "loss": 14.6657, | |
| "step": 4370 | |
| }, | |
| { | |
| "epoch": 0.438, | |
| "grad_norm": 1.339852213859558, | |
| "learning_rate": 3.379072652904321e-05, | |
| "loss": 14.3125, | |
| "step": 4380 | |
| }, | |
| { | |
| "epoch": 0.439, | |
| "grad_norm": 1.3522120714187622, | |
| "learning_rate": 3.375220978732289e-05, | |
| "loss": 13.5858, | |
| "step": 4390 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 1.2488036155700684, | |
| "learning_rate": 3.371382445727951e-05, | |
| "loss": 14.0461, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "eval_loss": 3.6404318809509277, | |
| "eval_runtime": 6.2486, | |
| "eval_samples_per_second": 320.074, | |
| "eval_steps_per_second": 20.005, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.441, | |
| "grad_norm": 1.4024121761322021, | |
| "learning_rate": 3.3675569793356654e-05, | |
| "loss": 14.3792, | |
| "step": 4410 | |
| }, | |
| { | |
| "epoch": 0.442, | |
| "grad_norm": 1.574225664138794, | |
| "learning_rate": 3.36374450559063e-05, | |
| "loss": 14.2403, | |
| "step": 4420 | |
| }, | |
| { | |
| "epoch": 0.443, | |
| "grad_norm": 1.3503550291061401, | |
| "learning_rate": 3.359944951112878e-05, | |
| "loss": 14.2765, | |
| "step": 4430 | |
| }, | |
| { | |
| "epoch": 0.444, | |
| "grad_norm": 1.3308185338974, | |
| "learning_rate": 3.356158243101345e-05, | |
| "loss": 14.3184, | |
| "step": 4440 | |
| }, | |
| { | |
| "epoch": 0.445, | |
| "grad_norm": 1.4911550283432007, | |
| "learning_rate": 3.3523843093280096e-05, | |
| "loss": 13.6826, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 0.445, | |
| "eval_loss": 3.6506519317626953, | |
| "eval_runtime": 6.2039, | |
| "eval_samples_per_second": 322.377, | |
| "eval_steps_per_second": 20.149, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 0.446, | |
| "grad_norm": 1.5103703737258911, | |
| "learning_rate": 3.348623078132111e-05, | |
| "loss": 14.1831, | |
| "step": 4460 | |
| }, | |
| { | |
| "epoch": 0.447, | |
| "grad_norm": 1.20027756690979, | |
| "learning_rate": 3.344874478414426e-05, | |
| "loss": 14.0568, | |
| "step": 4470 | |
| }, | |
| { | |
| "epoch": 0.448, | |
| "grad_norm": 1.173339605331421, | |
| "learning_rate": 3.341138439631633e-05, | |
| "loss": 14.4583, | |
| "step": 4480 | |
| }, | |
| { | |
| "epoch": 0.449, | |
| "grad_norm": 1.2948070764541626, | |
| "learning_rate": 3.337414891790731e-05, | |
| "loss": 14.4602, | |
| "step": 4490 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 1.2212433815002441, | |
| "learning_rate": 3.3337037654435325e-05, | |
| "loss": 14.036, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "eval_loss": 3.6365511417388916, | |
| "eval_runtime": 6.2415, | |
| "eval_samples_per_second": 320.433, | |
| "eval_steps_per_second": 20.027, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.451, | |
| "grad_norm": 1.335633397102356, | |
| "learning_rate": 3.330004991681224e-05, | |
| "loss": 13.7247, | |
| "step": 4510 | |
| }, | |
| { | |
| "epoch": 0.452, | |
| "grad_norm": 1.226434588432312, | |
| "learning_rate": 3.3263185021289925e-05, | |
| "loss": 14.2707, | |
| "step": 4520 | |
| }, | |
| { | |
| "epoch": 0.453, | |
| "grad_norm": 1.2056890726089478, | |
| "learning_rate": 3.322644228940717e-05, | |
| "loss": 14.7619, | |
| "step": 4530 | |
| }, | |
| { | |
| "epoch": 0.454, | |
| "grad_norm": 1.4296637773513794, | |
| "learning_rate": 3.318982104793721e-05, | |
| "loss": 14.6893, | |
| "step": 4540 | |
| }, | |
| { | |
| "epoch": 0.455, | |
| "grad_norm": 1.3162144422531128, | |
| "learning_rate": 3.3153320628835953e-05, | |
| "loss": 14.2959, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 0.455, | |
| "eval_loss": 3.630129337310791, | |
| "eval_runtime": 6.2099, | |
| "eval_samples_per_second": 322.065, | |
| "eval_steps_per_second": 20.129, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 0.456, | |
| "grad_norm": 1.3302292823791504, | |
| "learning_rate": 3.311694036919079e-05, | |
| "loss": 14.435, | |
| "step": 4560 | |
| }, | |
| { | |
| "epoch": 0.457, | |
| "grad_norm": 1.6379554271697998, | |
| "learning_rate": 3.308067961117001e-05, | |
| "loss": 14.5194, | |
| "step": 4570 | |
| }, | |
| { | |
| "epoch": 0.458, | |
| "grad_norm": 1.1413871049880981, | |
| "learning_rate": 3.3044537701972836e-05, | |
| "loss": 14.5356, | |
| "step": 4580 | |
| }, | |
| { | |
| "epoch": 0.459, | |
| "grad_norm": 1.333553671836853, | |
| "learning_rate": 3.30085139937801e-05, | |
| "loss": 13.9572, | |
| "step": 4590 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 1.2290724515914917, | |
| "learning_rate": 3.2972607843705445e-05, | |
| "loss": 14.2627, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "eval_loss": 3.626358985900879, | |
| "eval_runtime": 6.1856, | |
| "eval_samples_per_second": 323.334, | |
| "eval_steps_per_second": 20.208, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.461, | |
| "grad_norm": 1.2494893074035645, | |
| "learning_rate": 3.293681861374713e-05, | |
| "loss": 14.3398, | |
| "step": 4610 | |
| }, | |
| { | |
| "epoch": 0.462, | |
| "grad_norm": 1.3624249696731567, | |
| "learning_rate": 3.290114567074045e-05, | |
| "loss": 14.3332, | |
| "step": 4620 | |
| }, | |
| { | |
| "epoch": 0.463, | |
| "grad_norm": 1.1090985536575317, | |
| "learning_rate": 3.2865588386310646e-05, | |
| "loss": 13.7097, | |
| "step": 4630 | |
| }, | |
| { | |
| "epoch": 0.464, | |
| "grad_norm": 1.1444495916366577, | |
| "learning_rate": 3.2830146136826445e-05, | |
| "loss": 14.2853, | |
| "step": 4640 | |
| }, | |
| { | |
| "epoch": 0.465, | |
| "grad_norm": 1.2577887773513794, | |
| "learning_rate": 3.2794818303354126e-05, | |
| "loss": 14.1538, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 0.465, | |
| "eval_loss": 3.6183412075042725, | |
| "eval_runtime": 6.2157, | |
| "eval_samples_per_second": 321.764, | |
| "eval_steps_per_second": 20.11, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 0.466, | |
| "grad_norm": 1.1332851648330688, | |
| "learning_rate": 3.2759604271612094e-05, | |
| "loss": 13.2038, | |
| "step": 4660 | |
| }, | |
| { | |
| "epoch": 0.467, | |
| "grad_norm": 1.2543771266937256, | |
| "learning_rate": 3.272450343192603e-05, | |
| "loss": 14.1238, | |
| "step": 4670 | |
| }, | |
| { | |
| "epoch": 0.468, | |
| "grad_norm": 1.3030415773391724, | |
| "learning_rate": 3.2689515179184576e-05, | |
| "loss": 14.598, | |
| "step": 4680 | |
| }, | |
| { | |
| "epoch": 0.469, | |
| "grad_norm": 1.4264740943908691, | |
| "learning_rate": 3.265463891279551e-05, | |
| "loss": 14.4331, | |
| "step": 4690 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 1.3160494565963745, | |
| "learning_rate": 3.2619874036642426e-05, | |
| "loss": 14.5275, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "eval_loss": 3.6218349933624268, | |
| "eval_runtime": 6.246, | |
| "eval_samples_per_second": 320.205, | |
| "eval_steps_per_second": 20.013, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.471, | |
| "grad_norm": 1.3615264892578125, | |
| "learning_rate": 3.258521995904196e-05, | |
| "loss": 14.6488, | |
| "step": 4710 | |
| }, | |
| { | |
| "epoch": 0.472, | |
| "grad_norm": 1.199601173400879, | |
| "learning_rate": 3.2550676092701496e-05, | |
| "loss": 14.2667, | |
| "step": 4720 | |
| }, | |
| { | |
| "epoch": 0.473, | |
| "grad_norm": 1.1377754211425781, | |
| "learning_rate": 3.2516241854677356e-05, | |
| "loss": 14.5754, | |
| "step": 4730 | |
| }, | |
| { | |
| "epoch": 0.474, | |
| "grad_norm": 1.3825005292892456, | |
| "learning_rate": 3.248191666633348e-05, | |
| "loss": 14.6365, | |
| "step": 4740 | |
| }, | |
| { | |
| "epoch": 0.475, | |
| "grad_norm": 1.1313741207122803, | |
| "learning_rate": 3.244769995330059e-05, | |
| "loss": 14.2485, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.475, | |
| "eval_loss": 3.6164262294769287, | |
| "eval_runtime": 6.2202, | |
| "eval_samples_per_second": 321.531, | |
| "eval_steps_per_second": 20.096, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.476, | |
| "grad_norm": 1.176073431968689, | |
| "learning_rate": 3.241359114543582e-05, | |
| "loss": 14.2109, | |
| "step": 4760 | |
| }, | |
| { | |
| "epoch": 0.477, | |
| "grad_norm": 1.5828211307525635, | |
| "learning_rate": 3.237958967678283e-05, | |
| "loss": 14.7457, | |
| "step": 4770 | |
| }, | |
| { | |
| "epoch": 0.478, | |
| "grad_norm": 1.4503830671310425, | |
| "learning_rate": 3.234569498553233e-05, | |
| "loss": 14.3239, | |
| "step": 4780 | |
| }, | |
| { | |
| "epoch": 0.479, | |
| "grad_norm": 1.7589170932769775, | |
| "learning_rate": 3.231190651398314e-05, | |
| "loss": 14.0929, | |
| "step": 4790 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 1.3697690963745117, | |
| "learning_rate": 3.227822370850359e-05, | |
| "loss": 14.5645, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_loss": 3.614612579345703, | |
| "eval_runtime": 6.2212, | |
| "eval_samples_per_second": 321.483, | |
| "eval_steps_per_second": 20.093, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.481, | |
| "grad_norm": 1.6647683382034302, | |
| "learning_rate": 3.224464601949349e-05, | |
| "loss": 14.3627, | |
| "step": 4810 | |
| }, | |
| { | |
| "epoch": 0.482, | |
| "grad_norm": 1.6885759830474854, | |
| "learning_rate": 3.2211172901346385e-05, | |
| "loss": 14.3836, | |
| "step": 4820 | |
| }, | |
| { | |
| "epoch": 0.483, | |
| "grad_norm": 1.4781463146209717, | |
| "learning_rate": 3.21778038124124e-05, | |
| "loss": 14.2925, | |
| "step": 4830 | |
| }, | |
| { | |
| "epoch": 0.484, | |
| "grad_norm": 1.5786219835281372, | |
| "learning_rate": 3.2144538214961344e-05, | |
| "loss": 13.8596, | |
| "step": 4840 | |
| }, | |
| { | |
| "epoch": 0.485, | |
| "grad_norm": 1.4004464149475098, | |
| "learning_rate": 3.21113755751464e-05, | |
| "loss": 13.7206, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 0.485, | |
| "eval_loss": 3.6186439990997314, | |
| "eval_runtime": 6.2035, | |
| "eval_samples_per_second": 322.396, | |
| "eval_steps_per_second": 20.15, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 0.486, | |
| "grad_norm": 1.272031545639038, | |
| "learning_rate": 3.207831536296808e-05, | |
| "loss": 14.5275, | |
| "step": 4860 | |
| }, | |
| { | |
| "epoch": 0.487, | |
| "grad_norm": 1.3145509958267212, | |
| "learning_rate": 3.2045357052238676e-05, | |
| "loss": 14.3756, | |
| "step": 4870 | |
| }, | |
| { | |
| "epoch": 0.488, | |
| "grad_norm": 1.108602523803711, | |
| "learning_rate": 3.201250012054707e-05, | |
| "loss": 13.865, | |
| "step": 4880 | |
| }, | |
| { | |
| "epoch": 0.489, | |
| "grad_norm": 1.4979068040847778, | |
| "learning_rate": 3.197974404922397e-05, | |
| "loss": 14.5053, | |
| "step": 4890 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 1.2225371599197388, | |
| "learning_rate": 3.194708832330752e-05, | |
| "loss": 14.6196, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "eval_loss": 3.6117711067199707, | |
| "eval_runtime": 6.2829, | |
| "eval_samples_per_second": 318.327, | |
| "eval_steps_per_second": 19.895, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.491, | |
| "grad_norm": 1.3729444742202759, | |
| "learning_rate": 3.191453243150929e-05, | |
| "loss": 14.44, | |
| "step": 4910 | |
| }, | |
| { | |
| "epoch": 0.492, | |
| "grad_norm": 1.4063236713409424, | |
| "learning_rate": 3.188207586618064e-05, | |
| "loss": 14.2631, | |
| "step": 4920 | |
| }, | |
| { | |
| "epoch": 0.493, | |
| "grad_norm": 1.1830472946166992, | |
| "learning_rate": 3.1849718123279517e-05, | |
| "loss": 13.5341, | |
| "step": 4930 | |
| }, | |
| { | |
| "epoch": 0.494, | |
| "grad_norm": 1.3388463258743286, | |
| "learning_rate": 3.181745870233753e-05, | |
| "loss": 13.9621, | |
| "step": 4940 | |
| }, | |
| { | |
| "epoch": 0.495, | |
| "grad_norm": 1.2672926187515259, | |
| "learning_rate": 3.178529710642749e-05, | |
| "loss": 14.2384, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 0.495, | |
| "eval_loss": 3.6163549423217773, | |
| "eval_runtime": 6.2068, | |
| "eval_samples_per_second": 322.225, | |
| "eval_steps_per_second": 20.139, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 0.496, | |
| "grad_norm": 1.2444629669189453, | |
| "learning_rate": 3.175323284213118e-05, | |
| "loss": 14.3422, | |
| "step": 4960 | |
| }, | |
| { | |
| "epoch": 0.497, | |
| "grad_norm": 1.3806087970733643, | |
| "learning_rate": 3.172126541950766e-05, | |
| "loss": 14.1642, | |
| "step": 4970 | |
| }, | |
| { | |
| "epoch": 0.498, | |
| "grad_norm": 1.3681821823120117, | |
| "learning_rate": 3.1689394352061735e-05, | |
| "loss": 14.0664, | |
| "step": 4980 | |
| }, | |
| { | |
| "epoch": 0.499, | |
| "grad_norm": 1.6592974662780762, | |
| "learning_rate": 3.165761915671293e-05, | |
| "loss": 14.3502, | |
| "step": 4990 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 1.1652058362960815, | |
| "learning_rate": 3.162593935376469e-05, | |
| "loss": 14.4776, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "eval_loss": 3.6070852279663086, | |
| "eval_runtime": 6.2187, | |
| "eval_samples_per_second": 321.609, | |
| "eval_steps_per_second": 20.101, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.501, | |
| "grad_norm": 1.1766422986984253, | |
| "learning_rate": 3.159435446687396e-05, | |
| "loss": 14.1349, | |
| "step": 5010 | |
| }, | |
| { | |
| "epoch": 0.502, | |
| "grad_norm": 1.2750104665756226, | |
| "learning_rate": 3.1562864023021174e-05, | |
| "loss": 14.2648, | |
| "step": 5020 | |
| }, | |
| { | |
| "epoch": 0.503, | |
| "grad_norm": 1.1030397415161133, | |
| "learning_rate": 3.1531467552480395e-05, | |
| "loss": 14.0325, | |
| "step": 5030 | |
| }, | |
| { | |
| "epoch": 0.504, | |
| "grad_norm": 1.3132902383804321, | |
| "learning_rate": 3.1500164588789964e-05, | |
| "loss": 14.5553, | |
| "step": 5040 | |
| }, | |
| { | |
| "epoch": 0.505, | |
| "grad_norm": 1.1227859258651733, | |
| "learning_rate": 3.146895466872338e-05, | |
| "loss": 14.3816, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 0.505, | |
| "eval_loss": 3.6058602333068848, | |
| "eval_runtime": 6.2308, | |
| "eval_samples_per_second": 320.985, | |
| "eval_steps_per_second": 20.062, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 0.506, | |
| "grad_norm": 1.3989307880401611, | |
| "learning_rate": 3.1437837332260465e-05, | |
| "loss": 14.3869, | |
| "step": 5060 | |
| }, | |
| { | |
| "epoch": 0.507, | |
| "grad_norm": 1.2606079578399658, | |
| "learning_rate": 3.140681212255896e-05, | |
| "loss": 14.8284, | |
| "step": 5070 | |
| }, | |
| { | |
| "epoch": 0.508, | |
| "grad_norm": 1.5170297622680664, | |
| "learning_rate": 3.137587858592628e-05, | |
| "loss": 14.2912, | |
| "step": 5080 | |
| }, | |
| { | |
| "epoch": 0.509, | |
| "grad_norm": 1.1421058177947998, | |
| "learning_rate": 3.13450362717917e-05, | |
| "loss": 14.3119, | |
| "step": 5090 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 1.2525449991226196, | |
| "learning_rate": 3.131428473267876e-05, | |
| "loss": 14.4662, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "eval_loss": 3.6014516353607178, | |
| "eval_runtime": 6.2101, | |
| "eval_samples_per_second": 322.058, | |
| "eval_steps_per_second": 20.129, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.511, | |
| "grad_norm": 1.1750966310501099, | |
| "learning_rate": 3.1283623524178034e-05, | |
| "loss": 14.0454, | |
| "step": 5110 | |
| }, | |
| { | |
| "epoch": 0.512, | |
| "grad_norm": 1.2625198364257812, | |
| "learning_rate": 3.125305220492011e-05, | |
| "loss": 14.3145, | |
| "step": 5120 | |
| }, | |
| { | |
| "epoch": 0.513, | |
| "grad_norm": 1.3217575550079346, | |
| "learning_rate": 3.122257033654893e-05, | |
| "loss": 14.3861, | |
| "step": 5130 | |
| }, | |
| { | |
| "epoch": 0.514, | |
| "grad_norm": 1.1578750610351562, | |
| "learning_rate": 3.119217748369538e-05, | |
| "loss": 14.7037, | |
| "step": 5140 | |
| }, | |
| { | |
| "epoch": 0.515, | |
| "grad_norm": 1.0977482795715332, | |
| "learning_rate": 3.11618732139512e-05, | |
| "loss": 14.5877, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 0.515, | |
| "eval_loss": 3.6061079502105713, | |
| "eval_runtime": 6.2037, | |
| "eval_samples_per_second": 322.391, | |
| "eval_steps_per_second": 20.149, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 0.516, | |
| "grad_norm": 1.5148530006408691, | |
| "learning_rate": 3.113165709784312e-05, | |
| "loss": 14.0579, | |
| "step": 5160 | |
| }, | |
| { | |
| "epoch": 0.517, | |
| "grad_norm": 1.2597321271896362, | |
| "learning_rate": 3.110152870880728e-05, | |
| "loss": 14.3179, | |
| "step": 5170 | |
| }, | |
| { | |
| "epoch": 0.518, | |
| "grad_norm": 1.0222440958023071, | |
| "learning_rate": 3.107148762316397e-05, | |
| "loss": 14.5666, | |
| "step": 5180 | |
| }, | |
| { | |
| "epoch": 0.519, | |
| "grad_norm": 1.2599363327026367, | |
| "learning_rate": 3.104153342009261e-05, | |
| "loss": 14.1644, | |
| "step": 5190 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 1.148889422416687, | |
| "learning_rate": 3.101166568160696e-05, | |
| "loss": 13.9043, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "eval_loss": 3.596761703491211, | |
| "eval_runtime": 6.2279, | |
| "eval_samples_per_second": 321.133, | |
| "eval_steps_per_second": 20.071, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.521, | |
| "grad_norm": 1.254384160041809, | |
| "learning_rate": 3.098188399253065e-05, | |
| "loss": 14.405, | |
| "step": 5210 | |
| }, | |
| { | |
| "epoch": 0.522, | |
| "grad_norm": 1.6923539638519287, | |
| "learning_rate": 3.0952187940472915e-05, | |
| "loss": 14.4278, | |
| "step": 5220 | |
| }, | |
| { | |
| "epoch": 0.523, | |
| "grad_norm": 1.6045749187469482, | |
| "learning_rate": 3.092257711580463e-05, | |
| "loss": 14.2743, | |
| "step": 5230 | |
| }, | |
| { | |
| "epoch": 0.524, | |
| "grad_norm": 1.2014061212539673, | |
| "learning_rate": 3.089305111163457e-05, | |
| "loss": 14.4292, | |
| "step": 5240 | |
| }, | |
| { | |
| "epoch": 0.525, | |
| "grad_norm": 1.3017672300338745, | |
| "learning_rate": 3.0863609523785955e-05, | |
| "loss": 14.5071, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.525, | |
| "eval_loss": 3.590036153793335, | |
| "eval_runtime": 6.1954, | |
| "eval_samples_per_second": 322.822, | |
| "eval_steps_per_second": 20.176, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.526, | |
| "grad_norm": 1.3010127544403076, | |
| "learning_rate": 3.083425195077315e-05, | |
| "loss": 14.6025, | |
| "step": 5260 | |
| }, | |
| { | |
| "epoch": 0.527, | |
| "grad_norm": 1.4094475507736206, | |
| "learning_rate": 3.080497799377872e-05, | |
| "loss": 14.0731, | |
| "step": 5270 | |
| }, | |
| { | |
| "epoch": 0.528, | |
| "grad_norm": 1.1461422443389893, | |
| "learning_rate": 3.077578725663068e-05, | |
| "loss": 13.7408, | |
| "step": 5280 | |
| }, | |
| { | |
| "epoch": 0.529, | |
| "grad_norm": 1.1306099891662598, | |
| "learning_rate": 3.0746679345779934e-05, | |
| "loss": 13.4037, | |
| "step": 5290 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 1.536644697189331, | |
| "learning_rate": 3.071765387027802e-05, | |
| "loss": 13.4816, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "eval_loss": 3.6046133041381836, | |
| "eval_runtime": 6.2359, | |
| "eval_samples_per_second": 320.723, | |
| "eval_steps_per_second": 20.045, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.531, | |
| "grad_norm": 1.5308995246887207, | |
| "learning_rate": 3.068871044175507e-05, | |
| "loss": 14.0583, | |
| "step": 5310 | |
| }, | |
| { | |
| "epoch": 0.532, | |
| "grad_norm": 1.1854599714279175, | |
| "learning_rate": 3.0659848674397934e-05, | |
| "loss": 13.6901, | |
| "step": 5320 | |
| }, | |
| { | |
| "epoch": 0.533, | |
| "grad_norm": 1.1849660873413086, | |
| "learning_rate": 3.063106818492863e-05, | |
| "loss": 13.6696, | |
| "step": 5330 | |
| }, | |
| { | |
| "epoch": 0.534, | |
| "grad_norm": 1.3578708171844482, | |
| "learning_rate": 3.060236859258293e-05, | |
| "loss": 13.4662, | |
| "step": 5340 | |
| }, | |
| { | |
| "epoch": 0.535, | |
| "grad_norm": 1.1757259368896484, | |
| "learning_rate": 3.057374951908926e-05, | |
| "loss": 13.7274, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 0.535, | |
| "eval_loss": 3.6120777130126953, | |
| "eval_runtime": 6.1981, | |
| "eval_samples_per_second": 322.682, | |
| "eval_steps_per_second": 20.168, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 0.536, | |
| "grad_norm": 1.3536651134490967, | |
| "learning_rate": 3.054521058864766e-05, | |
| "loss": 14.1579, | |
| "step": 5360 | |
| }, | |
| { | |
| "epoch": 0.537, | |
| "grad_norm": 1.3435899019241333, | |
| "learning_rate": 3.051675142790916e-05, | |
| "loss": 14.4204, | |
| "step": 5370 | |
| }, | |
| { | |
| "epoch": 0.538, | |
| "grad_norm": 1.1941726207733154, | |
| "learning_rate": 3.048837166595521e-05, | |
| "loss": 14.1706, | |
| "step": 5380 | |
| }, | |
| { | |
| "epoch": 0.539, | |
| "grad_norm": 1.123294472694397, | |
| "learning_rate": 3.0460070934277386e-05, | |
| "loss": 13.9552, | |
| "step": 5390 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 1.2226771116256714, | |
| "learning_rate": 3.0431848866757295e-05, | |
| "loss": 14.0163, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "eval_loss": 3.5943922996520996, | |
| "eval_runtime": 6.2301, | |
| "eval_samples_per_second": 321.024, | |
| "eval_steps_per_second": 20.064, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.541, | |
| "grad_norm": 1.2817046642303467, | |
| "learning_rate": 3.040370509964669e-05, | |
| "loss": 13.9803, | |
| "step": 5410 | |
| }, | |
| { | |
| "epoch": 0.542, | |
| "grad_norm": 1.2297040224075317, | |
| "learning_rate": 3.037563927154777e-05, | |
| "loss": 14.6411, | |
| "step": 5420 | |
| }, | |
| { | |
| "epoch": 0.543, | |
| "grad_norm": 1.2498054504394531, | |
| "learning_rate": 3.0347651023393664e-05, | |
| "loss": 14.4703, | |
| "step": 5430 | |
| }, | |
| { | |
| "epoch": 0.544, | |
| "grad_norm": 1.189957857131958, | |
| "learning_rate": 3.0319739998429204e-05, | |
| "loss": 13.9314, | |
| "step": 5440 | |
| }, | |
| { | |
| "epoch": 0.545, | |
| "grad_norm": 1.176761507987976, | |
| "learning_rate": 3.0291905842191765e-05, | |
| "loss": 13.9819, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 0.545, | |
| "eval_loss": 3.5868208408355713, | |
| "eval_runtime": 6.2396, | |
| "eval_samples_per_second": 320.535, | |
| "eval_steps_per_second": 20.033, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 0.546, | |
| "grad_norm": 1.2379381656646729, | |
| "learning_rate": 3.0264148202492366e-05, | |
| "loss": 14.4884, | |
| "step": 5460 | |
| }, | |
| { | |
| "epoch": 0.547, | |
| "grad_norm": 1.1885240077972412, | |
| "learning_rate": 3.0236466729396972e-05, | |
| "loss": 13.8977, | |
| "step": 5470 | |
| }, | |
| { | |
| "epoch": 0.548, | |
| "grad_norm": 1.4787578582763672, | |
| "learning_rate": 3.020886107520798e-05, | |
| "loss": 14.1968, | |
| "step": 5480 | |
| }, | |
| { | |
| "epoch": 0.549, | |
| "grad_norm": 1.121922254562378, | |
| "learning_rate": 3.0181330894445807e-05, | |
| "loss": 13.8813, | |
| "step": 5490 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 1.4813346862792969, | |
| "learning_rate": 3.0153875843830832e-05, | |
| "loss": 14.2594, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_loss": 3.5890376567840576, | |
| "eval_runtime": 6.2112, | |
| "eval_samples_per_second": 322.001, | |
| "eval_steps_per_second": 20.125, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.551, | |
| "grad_norm": 1.2030844688415527, | |
| "learning_rate": 3.0126495582265325e-05, | |
| "loss": 14.2157, | |
| "step": 5510 | |
| }, | |
| { | |
| "epoch": 0.552, | |
| "grad_norm": 1.1894972324371338, | |
| "learning_rate": 3.0099189770815694e-05, | |
| "loss": 13.9861, | |
| "step": 5520 | |
| }, | |
| { | |
| "epoch": 0.553, | |
| "grad_norm": 1.2028629779815674, | |
| "learning_rate": 3.0071958072694866e-05, | |
| "loss": 13.931, | |
| "step": 5530 | |
| }, | |
| { | |
| "epoch": 0.554, | |
| "grad_norm": 1.2379300594329834, | |
| "learning_rate": 3.004480015324483e-05, | |
| "loss": 14.1274, | |
| "step": 5540 | |
| }, | |
| { | |
| "epoch": 0.555, | |
| "grad_norm": 1.0493221282958984, | |
| "learning_rate": 3.0017715679919344e-05, | |
| "loss": 14.3283, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 0.555, | |
| "eval_loss": 3.582146644592285, | |
| "eval_runtime": 6.224, | |
| "eval_samples_per_second": 321.336, | |
| "eval_steps_per_second": 20.084, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 0.556, | |
| "grad_norm": 1.2582511901855469, | |
| "learning_rate": 2.9990704322266887e-05, | |
| "loss": 14.4304, | |
| "step": 5560 | |
| }, | |
| { | |
| "epoch": 0.557, | |
| "grad_norm": 1.212406873703003, | |
| "learning_rate": 2.9963765751913658e-05, | |
| "loss": 14.5878, | |
| "step": 5570 | |
| }, | |
| { | |
| "epoch": 0.558, | |
| "grad_norm": 1.2840884923934937, | |
| "learning_rate": 2.9936899642546866e-05, | |
| "loss": 14.2102, | |
| "step": 5580 | |
| }, | |
| { | |
| "epoch": 0.559, | |
| "grad_norm": 1.3281713724136353, | |
| "learning_rate": 2.991010566989808e-05, | |
| "loss": 13.6666, | |
| "step": 5590 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 1.5215142965316772, | |
| "learning_rate": 2.9883383511726847e-05, | |
| "loss": 13.9324, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "eval_loss": 3.5781474113464355, | |
| "eval_runtime": 6.226, | |
| "eval_samples_per_second": 321.232, | |
| "eval_steps_per_second": 20.077, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.561, | |
| "grad_norm": 1.1610621213912964, | |
| "learning_rate": 2.985673284780436e-05, | |
| "loss": 14.2228, | |
| "step": 5610 | |
| }, | |
| { | |
| "epoch": 0.562, | |
| "grad_norm": 1.1308223009109497, | |
| "learning_rate": 2.9830153359897363e-05, | |
| "loss": 14.2885, | |
| "step": 5620 | |
| }, | |
| { | |
| "epoch": 0.563, | |
| "grad_norm": 1.1470520496368408, | |
| "learning_rate": 2.9803644731752194e-05, | |
| "loss": 14.5404, | |
| "step": 5630 | |
| }, | |
| { | |
| "epoch": 0.564, | |
| "grad_norm": 1.3501935005187988, | |
| "learning_rate": 2.9777206649078993e-05, | |
| "loss": 14.237, | |
| "step": 5640 | |
| }, | |
| { | |
| "epoch": 0.565, | |
| "grad_norm": 1.115102767944336, | |
| "learning_rate": 2.9750838799536022e-05, | |
| "loss": 13.7697, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 0.565, | |
| "eval_loss": 3.5766561031341553, | |
| "eval_runtime": 6.2493, | |
| "eval_samples_per_second": 320.035, | |
| "eval_steps_per_second": 20.002, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 0.566, | |
| "grad_norm": 0.9969695210456848, | |
| "learning_rate": 2.9724540872714186e-05, | |
| "loss": 14.0174, | |
| "step": 5660 | |
| }, | |
| { | |
| "epoch": 0.567, | |
| "grad_norm": 1.4996564388275146, | |
| "learning_rate": 2.969831256012167e-05, | |
| "loss": 14.4387, | |
| "step": 5670 | |
| }, | |
| { | |
| "epoch": 0.568, | |
| "grad_norm": 1.0316441059112549, | |
| "learning_rate": 2.9672153555168763e-05, | |
| "loss": 14.0463, | |
| "step": 5680 | |
| }, | |
| { | |
| "epoch": 0.569, | |
| "grad_norm": 1.5262383222579956, | |
| "learning_rate": 2.9646063553152792e-05, | |
| "loss": 14.1116, | |
| "step": 5690 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 1.442413568496704, | |
| "learning_rate": 2.962004225124321e-05, | |
| "loss": 13.7311, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "eval_loss": 3.5711216926574707, | |
| "eval_runtime": 6.2251, | |
| "eval_samples_per_second": 321.28, | |
| "eval_steps_per_second": 20.08, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.571, | |
| "grad_norm": 1.0696613788604736, | |
| "learning_rate": 2.9594089348466825e-05, | |
| "loss": 13.9055, | |
| "step": 5710 | |
| }, | |
| { | |
| "epoch": 0.572, | |
| "grad_norm": 1.1490660905838013, | |
| "learning_rate": 2.9568204545693235e-05, | |
| "loss": 13.5724, | |
| "step": 5720 | |
| }, | |
| { | |
| "epoch": 0.573, | |
| "grad_norm": 1.0024884939193726, | |
| "learning_rate": 2.9542387545620252e-05, | |
| "loss": 13.8718, | |
| "step": 5730 | |
| }, | |
| { | |
| "epoch": 0.574, | |
| "grad_norm": 1.1411281824111938, | |
| "learning_rate": 2.9516638052759665e-05, | |
| "loss": 13.9561, | |
| "step": 5740 | |
| }, | |
| { | |
| "epoch": 0.575, | |
| "grad_norm": 1.3843379020690918, | |
| "learning_rate": 2.9490955773422944e-05, | |
| "loss": 13.747, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 0.575, | |
| "eval_loss": 3.592867136001587, | |
| "eval_runtime": 6.2333, | |
| "eval_samples_per_second": 320.858, | |
| "eval_steps_per_second": 20.054, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 0.576, | |
| "grad_norm": 1.0970510244369507, | |
| "learning_rate": 2.946534041570722e-05, | |
| "loss": 14.139, | |
| "step": 5760 | |
| }, | |
| { | |
| "epoch": 0.577, | |
| "grad_norm": 1.0204675197601318, | |
| "learning_rate": 2.9439791689481354e-05, | |
| "loss": 13.7387, | |
| "step": 5770 | |
| }, | |
| { | |
| "epoch": 0.578, | |
| "grad_norm": 1.233353853225708, | |
| "learning_rate": 2.9414309306372117e-05, | |
| "loss": 14.0551, | |
| "step": 5780 | |
| }, | |
| { | |
| "epoch": 0.579, | |
| "grad_norm": 1.2701174020767212, | |
| "learning_rate": 2.938889297975051e-05, | |
| "loss": 14.2149, | |
| "step": 5790 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 1.036139726638794, | |
| "learning_rate": 2.936354242471826e-05, | |
| "loss": 13.816, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "eval_loss": 3.5758726596832275, | |
| "eval_runtime": 6.2078, | |
| "eval_samples_per_second": 322.177, | |
| "eval_steps_per_second": 20.136, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.581, | |
| "grad_norm": 1.3539680242538452, | |
| "learning_rate": 2.933825735809436e-05, | |
| "loss": 13.9908, | |
| "step": 5810 | |
| }, | |
| { | |
| "epoch": 0.582, | |
| "grad_norm": 1.2056362628936768, | |
| "learning_rate": 2.931303749840183e-05, | |
| "loss": 14.2202, | |
| "step": 5820 | |
| }, | |
| { | |
| "epoch": 0.583, | |
| "grad_norm": 1.1992346048355103, | |
| "learning_rate": 2.9287882565854518e-05, | |
| "loss": 14.2938, | |
| "step": 5830 | |
| }, | |
| { | |
| "epoch": 0.584, | |
| "grad_norm": 1.378831148147583, | |
| "learning_rate": 2.9262792282344083e-05, | |
| "loss": 13.963, | |
| "step": 5840 | |
| }, | |
| { | |
| "epoch": 0.585, | |
| "grad_norm": 1.3450469970703125, | |
| "learning_rate": 2.9237766371427078e-05, | |
| "loss": 14.0479, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 0.585, | |
| "eval_loss": 3.5703623294830322, | |
| "eval_runtime": 6.195, | |
| "eval_samples_per_second": 322.841, | |
| "eval_steps_per_second": 20.178, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 0.586, | |
| "grad_norm": 1.115608811378479, | |
| "learning_rate": 2.9212804558312162e-05, | |
| "loss": 14.1431, | |
| "step": 5860 | |
| }, | |
| { | |
| "epoch": 0.587, | |
| "grad_norm": 1.0845974683761597, | |
| "learning_rate": 2.918790656984741e-05, | |
| "loss": 14.2136, | |
| "step": 5870 | |
| }, | |
| { | |
| "epoch": 0.588, | |
| "grad_norm": 1.293483018875122, | |
| "learning_rate": 2.9163072134507774e-05, | |
| "loss": 14.0732, | |
| "step": 5880 | |
| }, | |
| { | |
| "epoch": 0.589, | |
| "grad_norm": 1.5905728340148926, | |
| "learning_rate": 2.9138300982382654e-05, | |
| "loss": 14.1266, | |
| "step": 5890 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 1.4333081245422363, | |
| "learning_rate": 2.9113592845163552e-05, | |
| "loss": 14.3786, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "eval_loss": 3.5610904693603516, | |
| "eval_runtime": 6.2518, | |
| "eval_samples_per_second": 319.906, | |
| "eval_steps_per_second": 19.994, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.591, | |
| "grad_norm": 1.3458027839660645, | |
| "learning_rate": 2.9088947456131888e-05, | |
| "loss": 14.4405, | |
| "step": 5910 | |
| }, | |
| { | |
| "epoch": 0.592, | |
| "grad_norm": 1.0692472457885742, | |
| "learning_rate": 2.906436455014689e-05, | |
| "loss": 14.2587, | |
| "step": 5920 | |
| }, | |
| { | |
| "epoch": 0.593, | |
| "grad_norm": 1.1816102266311646, | |
| "learning_rate": 2.903984386363363e-05, | |
| "loss": 14.0342, | |
| "step": 5930 | |
| }, | |
| { | |
| "epoch": 0.594, | |
| "grad_norm": 1.4598188400268555, | |
| "learning_rate": 2.901538513457116e-05, | |
| "loss": 14.1905, | |
| "step": 5940 | |
| }, | |
| { | |
| "epoch": 0.595, | |
| "grad_norm": 0.9857292771339417, | |
| "learning_rate": 2.8990988102480705e-05, | |
| "loss": 13.8953, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 0.595, | |
| "eval_loss": 3.569197654724121, | |
| "eval_runtime": 6.2134, | |
| "eval_samples_per_second": 321.884, | |
| "eval_steps_per_second": 20.118, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 0.596, | |
| "grad_norm": 1.5652998685836792, | |
| "learning_rate": 2.8966652508414088e-05, | |
| "loss": 14.1159, | |
| "step": 5960 | |
| }, | |
| { | |
| "epoch": 0.597, | |
| "grad_norm": 1.135145664215088, | |
| "learning_rate": 2.8942378094942106e-05, | |
| "loss": 14.1481, | |
| "step": 5970 | |
| }, | |
| { | |
| "epoch": 0.598, | |
| "grad_norm": 1.1402225494384766, | |
| "learning_rate": 2.8918164606143166e-05, | |
| "loss": 14.3713, | |
| "step": 5980 | |
| }, | |
| { | |
| "epoch": 0.599, | |
| "grad_norm": 1.2710378170013428, | |
| "learning_rate": 2.8894011787591897e-05, | |
| "loss": 14.1149, | |
| "step": 5990 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 1.2332837581634521, | |
| "learning_rate": 2.8869919386347948e-05, | |
| "loss": 14.2201, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "eval_loss": 3.5695314407348633, | |
| "eval_runtime": 6.2251, | |
| "eval_samples_per_second": 321.282, | |
| "eval_steps_per_second": 20.08, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.601, | |
| "grad_norm": 1.117622971534729, | |
| "learning_rate": 2.884588715094486e-05, | |
| "loss": 14.3036, | |
| "step": 6010 | |
| }, | |
| { | |
| "epoch": 0.602, | |
| "grad_norm": 1.1040174961090088, | |
| "learning_rate": 2.8821914831379037e-05, | |
| "loss": 13.8687, | |
| "step": 6020 | |
| }, | |
| { | |
| "epoch": 0.603, | |
| "grad_norm": 1.7144644260406494, | |
| "learning_rate": 2.879800217909883e-05, | |
| "loss": 13.7791, | |
| "step": 6030 | |
| }, | |
| { | |
| "epoch": 0.604, | |
| "grad_norm": 1.907306432723999, | |
| "learning_rate": 2.87741489469937e-05, | |
| "loss": 13.8325, | |
| "step": 6040 | |
| }, | |
| { | |
| "epoch": 0.605, | |
| "grad_norm": 1.427022099494934, | |
| "learning_rate": 2.8750354889383513e-05, | |
| "loss": 14.1623, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 0.605, | |
| "eval_loss": 3.5647103786468506, | |
| "eval_runtime": 6.1899, | |
| "eval_samples_per_second": 323.108, | |
| "eval_steps_per_second": 20.194, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 0.606, | |
| "grad_norm": 1.3600339889526367, | |
| "learning_rate": 2.872661976200789e-05, | |
| "loss": 13.9789, | |
| "step": 6060 | |
| }, | |
| { | |
| "epoch": 0.607, | |
| "grad_norm": 1.3188380002975464, | |
| "learning_rate": 2.8702943322015686e-05, | |
| "loss": 13.7975, | |
| "step": 6070 | |
| }, | |
| { | |
| "epoch": 0.608, | |
| "grad_norm": 1.5352420806884766, | |
| "learning_rate": 2.867932532795456e-05, | |
| "loss": 14.3163, | |
| "step": 6080 | |
| }, | |
| { | |
| "epoch": 0.609, | |
| "grad_norm": 1.3321303129196167, | |
| "learning_rate": 2.865576553976062e-05, | |
| "loss": 14.1487, | |
| "step": 6090 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 1.168168067932129, | |
| "learning_rate": 2.8632263718748197e-05, | |
| "loss": 14.033, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "eval_loss": 3.5599112510681152, | |
| "eval_runtime": 6.2025, | |
| "eval_samples_per_second": 322.449, | |
| "eval_steps_per_second": 20.153, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.611, | |
| "grad_norm": 1.336086630821228, | |
| "learning_rate": 2.8608819627599655e-05, | |
| "loss": 13.561, | |
| "step": 6110 | |
| }, | |
| { | |
| "epoch": 0.612, | |
| "grad_norm": 1.248259425163269, | |
| "learning_rate": 2.8585433030355346e-05, | |
| "loss": 13.796, | |
| "step": 6120 | |
| }, | |
| { | |
| "epoch": 0.613, | |
| "grad_norm": 1.2325026988983154, | |
| "learning_rate": 2.856210369240364e-05, | |
| "loss": 13.5651, | |
| "step": 6130 | |
| }, | |
| { | |
| "epoch": 0.614, | |
| "grad_norm": 1.2659862041473389, | |
| "learning_rate": 2.8538831380471032e-05, | |
| "loss": 14.1842, | |
| "step": 6140 | |
| }, | |
| { | |
| "epoch": 0.615, | |
| "grad_norm": 1.4123190641403198, | |
| "learning_rate": 2.851561586261235e-05, | |
| "loss": 14.1946, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 0.615, | |
| "eval_loss": 3.5558834075927734, | |
| "eval_runtime": 6.1869, | |
| "eval_samples_per_second": 323.262, | |
| "eval_steps_per_second": 20.204, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 0.616, | |
| "grad_norm": 1.2844250202178955, | |
| "learning_rate": 2.8492456908201033e-05, | |
| "loss": 13.8504, | |
| "step": 6160 | |
| }, | |
| { | |
| "epoch": 0.617, | |
| "grad_norm": 1.3036625385284424, | |
| "learning_rate": 2.8469354287919516e-05, | |
| "loss": 13.455, | |
| "step": 6170 | |
| }, | |
| { | |
| "epoch": 0.618, | |
| "grad_norm": 1.658996343612671, | |
| "learning_rate": 2.8446307773749708e-05, | |
| "loss": 13.9047, | |
| "step": 6180 | |
| }, | |
| { | |
| "epoch": 0.619, | |
| "grad_norm": 1.3913613557815552, | |
| "learning_rate": 2.8423317138963523e-05, | |
| "loss": 14.359, | |
| "step": 6190 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 1.2245821952819824, | |
| "learning_rate": 2.8400382158113496e-05, | |
| "loss": 13.9187, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "eval_loss": 3.5561468601226807, | |
| "eval_runtime": 6.1814, | |
| "eval_samples_per_second": 323.553, | |
| "eval_steps_per_second": 20.222, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.621, | |
| "grad_norm": 1.4613145589828491, | |
| "learning_rate": 2.8377502607023556e-05, | |
| "loss": 13.7003, | |
| "step": 6210 | |
| }, | |
| { | |
| "epoch": 0.622, | |
| "grad_norm": 1.1367855072021484, | |
| "learning_rate": 2.8354678262779732e-05, | |
| "loss": 14.1309, | |
| "step": 6220 | |
| }, | |
| { | |
| "epoch": 0.623, | |
| "grad_norm": 1.282486081123352, | |
| "learning_rate": 2.8331908903721126e-05, | |
| "loss": 14.1894, | |
| "step": 6230 | |
| }, | |
| { | |
| "epoch": 0.624, | |
| "grad_norm": 1.3200207948684692, | |
| "learning_rate": 2.830919430943082e-05, | |
| "loss": 13.954, | |
| "step": 6240 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 1.4164808988571167, | |
| "learning_rate": 2.828653426072691e-05, | |
| "loss": 14.2861, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "eval_loss": 3.5531787872314453, | |
| "eval_runtime": 6.1823, | |
| "eval_samples_per_second": 323.506, | |
| "eval_steps_per_second": 20.219, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 0.626, | |
| "grad_norm": 1.3889133930206299, | |
| "learning_rate": 2.8263928539653644e-05, | |
| "loss": 14.2126, | |
| "step": 6260 | |
| }, | |
| { | |
| "epoch": 0.627, | |
| "grad_norm": 1.80168879032135, | |
| "learning_rate": 2.824137692947261e-05, | |
| "loss": 14.0486, | |
| "step": 6270 | |
| }, | |
| { | |
| "epoch": 0.628, | |
| "grad_norm": 1.1456998586654663, | |
| "learning_rate": 2.8218879214653993e-05, | |
| "loss": 13.9792, | |
| "step": 6280 | |
| }, | |
| { | |
| "epoch": 0.629, | |
| "grad_norm": 1.2505043745040894, | |
| "learning_rate": 2.8196435180867964e-05, | |
| "loss": 13.6514, | |
| "step": 6290 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 1.216320276260376, | |
| "learning_rate": 2.817404461497606e-05, | |
| "loss": 14.249, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "eval_loss": 3.5436816215515137, | |
| "eval_runtime": 6.218, | |
| "eval_samples_per_second": 321.648, | |
| "eval_steps_per_second": 20.103, | |
| "step": 6300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 10000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9223372036854775807, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.3989320935079936e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |