| { | |
| "best_global_step": 625, | |
| "best_metric": 1.0413788557052612, | |
| "best_model_checkpoint": ".../training_output/checkpoint-600", | |
| "epoch": 10.0, | |
| "eval_steps": 25, | |
| "global_step": 640, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.15625, | |
| "grad_norm": 5.998435020446777, | |
| "learning_rate": 1.40625e-06, | |
| "loss": 4.4544, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3125, | |
| "grad_norm": 2.767270088195801, | |
| "learning_rate": 2.96875e-06, | |
| "loss": 4.2972, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.390625, | |
| "eval_loss": 2.121493101119995, | |
| "eval_runtime": 4.6524, | |
| "eval_samples_per_second": 92.855, | |
| "eval_steps_per_second": 5.803, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.46875, | |
| "grad_norm": 1.8981692790985107, | |
| "learning_rate": 4.53125e-06, | |
| "loss": 4.2058, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 1.126503348350525, | |
| "learning_rate": 6.093750000000001e-06, | |
| "loss": 4.1676, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.78125, | |
| "grad_norm": 1.022411823272705, | |
| "learning_rate": 7.656250000000001e-06, | |
| "loss": 4.1589, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.78125, | |
| "eval_loss": 2.0753986835479736, | |
| "eval_runtime": 4.6234, | |
| "eval_samples_per_second": 93.439, | |
| "eval_steps_per_second": 5.84, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9375, | |
| "grad_norm": 1.0090315341949463, | |
| "learning_rate": 9.21875e-06, | |
| "loss": 4.1537, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.09375, | |
| "grad_norm": 1.3605090379714966, | |
| "learning_rate": 9.913194444444446e-06, | |
| "loss": 4.1414, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.171875, | |
| "eval_loss": 2.052276372909546, | |
| "eval_runtime": 4.9117, | |
| "eval_samples_per_second": 87.953, | |
| "eval_steps_per_second": 5.497, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 2.2447760105133057, | |
| "learning_rate": 9.739583333333334e-06, | |
| "loss": 4.1134, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.40625, | |
| "grad_norm": 3.2149338722229004, | |
| "learning_rate": 9.565972222222222e-06, | |
| "loss": 4.0985, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.5625, | |
| "grad_norm": 4.0335187911987305, | |
| "learning_rate": 9.392361111111112e-06, | |
| "loss": 4.0703, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5625, | |
| "eval_loss": 2.0017964839935303, | |
| "eval_runtime": 4.8851, | |
| "eval_samples_per_second": 88.433, | |
| "eval_steps_per_second": 5.527, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.71875, | |
| "grad_norm": 4.933378219604492, | |
| "learning_rate": 9.21875e-06, | |
| "loss": 4.0612, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 5.589846134185791, | |
| "learning_rate": 9.045138888888889e-06, | |
| "loss": 4.0258, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.953125, | |
| "eval_loss": 1.940491795539856, | |
| "eval_runtime": 4.949, | |
| "eval_samples_per_second": 87.29, | |
| "eval_steps_per_second": 5.456, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.03125, | |
| "grad_norm": 6.472458839416504, | |
| "learning_rate": 8.871527777777779e-06, | |
| "loss": 3.9457, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.1875, | |
| "grad_norm": 8.088825225830078, | |
| "learning_rate": 8.697916666666667e-06, | |
| "loss": 3.8651, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.34375, | |
| "grad_norm": 10.9951171875, | |
| "learning_rate": 8.524305555555557e-06, | |
| "loss": 3.7917, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.34375, | |
| "eval_loss": 1.874100685119629, | |
| "eval_runtime": 4.6019, | |
| "eval_samples_per_second": 93.874, | |
| "eval_steps_per_second": 5.867, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 12.075238227844238, | |
| "learning_rate": 8.350694444444445e-06, | |
| "loss": 3.6884, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.65625, | |
| "grad_norm": 13.287556648254395, | |
| "learning_rate": 8.177083333333335e-06, | |
| "loss": 3.6679, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.734375, | |
| "eval_loss": 1.779481291770935, | |
| "eval_runtime": 5.5138, | |
| "eval_samples_per_second": 78.348, | |
| "eval_steps_per_second": 4.897, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.8125, | |
| "grad_norm": 13.939103126525879, | |
| "learning_rate": 8.003472222222223e-06, | |
| "loss": 3.6507, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.96875, | |
| "grad_norm": 14.256585121154785, | |
| "learning_rate": 7.829861111111112e-06, | |
| "loss": 3.5815, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.125, | |
| "grad_norm": 16.98706817626953, | |
| "learning_rate": 7.656250000000001e-06, | |
| "loss": 3.3862, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.125, | |
| "eval_loss": 1.6903235912322998, | |
| "eval_runtime": 4.8218, | |
| "eval_samples_per_second": 89.592, | |
| "eval_steps_per_second": 5.6, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.28125, | |
| "grad_norm": 19.495948791503906, | |
| "learning_rate": 7.482638888888889e-06, | |
| "loss": 3.3398, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.4375, | |
| "grad_norm": 18.759845733642578, | |
| "learning_rate": 7.309027777777779e-06, | |
| "loss": 3.23, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.515625, | |
| "eval_loss": 1.664551854133606, | |
| "eval_runtime": 5.0578, | |
| "eval_samples_per_second": 85.413, | |
| "eval_steps_per_second": 5.338, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 3.59375, | |
| "grad_norm": 19.621652603149414, | |
| "learning_rate": 7.135416666666667e-06, | |
| "loss": 3.2616, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 20.33112335205078, | |
| "learning_rate": 6.961805555555556e-06, | |
| "loss": 3.1892, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.90625, | |
| "grad_norm": 21.586963653564453, | |
| "learning_rate": 6.788194444444444e-06, | |
| "loss": 3.1402, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.90625, | |
| "eval_loss": 1.548005223274231, | |
| "eval_runtime": 4.7767, | |
| "eval_samples_per_second": 90.438, | |
| "eval_steps_per_second": 5.652, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 4.0625, | |
| "grad_norm": 20.794466018676758, | |
| "learning_rate": 6.614583333333334e-06, | |
| "loss": 3.0188, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.21875, | |
| "grad_norm": 21.908910751342773, | |
| "learning_rate": 6.4409722222222226e-06, | |
| "loss": 2.8433, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.296875, | |
| "eval_loss": 1.49290931224823, | |
| "eval_runtime": 4.6928, | |
| "eval_samples_per_second": 92.055, | |
| "eval_steps_per_second": 5.753, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 4.375, | |
| "grad_norm": 24.68117332458496, | |
| "learning_rate": 6.2673611111111125e-06, | |
| "loss": 2.8547, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 4.53125, | |
| "grad_norm": 24.355791091918945, | |
| "learning_rate": 6.093750000000001e-06, | |
| "loss": 2.8257, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 4.6875, | |
| "grad_norm": 23.48269271850586, | |
| "learning_rate": 5.920138888888889e-06, | |
| "loss": 2.7926, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.6875, | |
| "eval_loss": 1.4208338260650635, | |
| "eval_runtime": 4.6392, | |
| "eval_samples_per_second": 93.12, | |
| "eval_steps_per_second": 5.82, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.84375, | |
| "grad_norm": 25.30280303955078, | |
| "learning_rate": 5.746527777777778e-06, | |
| "loss": 2.8348, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 26.121788024902344, | |
| "learning_rate": 5.572916666666667e-06, | |
| "loss": 2.7752, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 5.078125, | |
| "eval_loss": 1.3631044626235962, | |
| "eval_runtime": 4.6263, | |
| "eval_samples_per_second": 93.379, | |
| "eval_steps_per_second": 5.836, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 5.15625, | |
| "grad_norm": 23.74436378479004, | |
| "learning_rate": 5.399305555555556e-06, | |
| "loss": 2.5492, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 5.3125, | |
| "grad_norm": 25.19687843322754, | |
| "learning_rate": 5.2256944444444445e-06, | |
| "loss": 2.5594, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 5.46875, | |
| "grad_norm": 24.556848526000977, | |
| "learning_rate": 5.0520833333333344e-06, | |
| "loss": 2.5388, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 5.46875, | |
| "eval_loss": 1.3000701665878296, | |
| "eval_runtime": 4.6145, | |
| "eval_samples_per_second": 93.618, | |
| "eval_steps_per_second": 5.851, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 5.625, | |
| "grad_norm": 25.34222984313965, | |
| "learning_rate": 4.878472222222223e-06, | |
| "loss": 2.4945, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 5.78125, | |
| "grad_norm": 26.028766632080078, | |
| "learning_rate": 4.704861111111112e-06, | |
| "loss": 2.4622, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 5.859375, | |
| "eval_loss": 1.2737797498703003, | |
| "eval_runtime": 4.6455, | |
| "eval_samples_per_second": 92.994, | |
| "eval_steps_per_second": 5.812, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 5.9375, | |
| "grad_norm": 25.235429763793945, | |
| "learning_rate": 4.53125e-06, | |
| "loss": 2.4739, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 6.09375, | |
| "grad_norm": 24.631134033203125, | |
| "learning_rate": 4.357638888888889e-06, | |
| "loss": 2.3813, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "grad_norm": 27.127145767211914, | |
| "learning_rate": 4.184027777777778e-06, | |
| "loss": 2.25, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "eval_loss": 1.220424771308899, | |
| "eval_runtime": 4.705, | |
| "eval_samples_per_second": 91.817, | |
| "eval_steps_per_second": 5.739, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 6.40625, | |
| "grad_norm": 24.72281837463379, | |
| "learning_rate": 4.010416666666667e-06, | |
| "loss": 2.3371, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 6.5625, | |
| "grad_norm": 25.333667755126953, | |
| "learning_rate": 3.836805555555556e-06, | |
| "loss": 2.2495, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 6.640625, | |
| "eval_loss": 1.1890788078308105, | |
| "eval_runtime": 4.9032, | |
| "eval_samples_per_second": 88.105, | |
| "eval_steps_per_second": 5.507, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 6.71875, | |
| "grad_norm": 25.572025299072266, | |
| "learning_rate": 3.6631944444444446e-06, | |
| "loss": 2.2528, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 6.875, | |
| "grad_norm": 26.69953155517578, | |
| "learning_rate": 3.4895833333333333e-06, | |
| "loss": 2.2632, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 7.03125, | |
| "grad_norm": 25.635225296020508, | |
| "learning_rate": 3.3159722222222224e-06, | |
| "loss": 2.1912, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 7.03125, | |
| "eval_loss": 1.143013596534729, | |
| "eval_runtime": 4.7149, | |
| "eval_samples_per_second": 91.624, | |
| "eval_steps_per_second": 5.727, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 7.1875, | |
| "grad_norm": 26.367721557617188, | |
| "learning_rate": 3.1423611111111115e-06, | |
| "loss": 2.0829, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 7.34375, | |
| "grad_norm": 26.74110984802246, | |
| "learning_rate": 2.96875e-06, | |
| "loss": 2.0772, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 7.421875, | |
| "eval_loss": 1.130937933921814, | |
| "eval_runtime": 4.8054, | |
| "eval_samples_per_second": 89.9, | |
| "eval_steps_per_second": 5.619, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 25.550865173339844, | |
| "learning_rate": 2.7951388888888893e-06, | |
| "loss": 2.0683, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 7.65625, | |
| "grad_norm": 26.189870834350586, | |
| "learning_rate": 2.621527777777778e-06, | |
| "loss": 2.1433, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 7.8125, | |
| "grad_norm": 25.142404556274414, | |
| "learning_rate": 2.4479166666666666e-06, | |
| "loss": 2.1272, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 7.8125, | |
| "eval_loss": 1.1044453382492065, | |
| "eval_runtime": 4.7489, | |
| "eval_samples_per_second": 90.969, | |
| "eval_steps_per_second": 5.686, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 7.96875, | |
| "grad_norm": 26.091402053833008, | |
| "learning_rate": 2.2743055555555557e-06, | |
| "loss": 2.0564, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 8.125, | |
| "grad_norm": 25.8810977935791, | |
| "learning_rate": 2.1006944444444448e-06, | |
| "loss": 1.9743, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 8.203125, | |
| "eval_loss": 1.0846548080444336, | |
| "eval_runtime": 4.9525, | |
| "eval_samples_per_second": 87.229, | |
| "eval_steps_per_second": 5.452, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 8.28125, | |
| "grad_norm": 25.896873474121094, | |
| "learning_rate": 1.9270833333333334e-06, | |
| "loss": 1.9884, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 8.4375, | |
| "grad_norm": 27.100982666015625, | |
| "learning_rate": 1.7534722222222223e-06, | |
| "loss": 1.9915, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 8.59375, | |
| "grad_norm": 27.147930145263672, | |
| "learning_rate": 1.5798611111111112e-06, | |
| "loss": 2.007, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 8.59375, | |
| "eval_loss": 1.0685715675354004, | |
| "eval_runtime": 4.6413, | |
| "eval_samples_per_second": 93.076, | |
| "eval_steps_per_second": 5.817, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 8.75, | |
| "grad_norm": 26.25347137451172, | |
| "learning_rate": 1.40625e-06, | |
| "loss": 1.9727, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 8.90625, | |
| "grad_norm": 26.688894271850586, | |
| "learning_rate": 1.232638888888889e-06, | |
| "loss": 1.9395, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 8.984375, | |
| "eval_loss": 1.061125636100769, | |
| "eval_runtime": 4.6582, | |
| "eval_samples_per_second": 92.74, | |
| "eval_steps_per_second": 5.796, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 9.0625, | |
| "grad_norm": 27.173229217529297, | |
| "learning_rate": 1.0590277777777778e-06, | |
| "loss": 1.9538, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 9.21875, | |
| "grad_norm": 25.862943649291992, | |
| "learning_rate": 8.854166666666668e-07, | |
| "loss": 1.9069, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 9.375, | |
| "grad_norm": 25.96297264099121, | |
| "learning_rate": 7.118055555555556e-07, | |
| "loss": 1.8768, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 9.375, | |
| "eval_loss": 1.0467159748077393, | |
| "eval_runtime": 4.6503, | |
| "eval_samples_per_second": 92.896, | |
| "eval_steps_per_second": 5.806, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 9.53125, | |
| "grad_norm": 26.35511589050293, | |
| "learning_rate": 5.381944444444445e-07, | |
| "loss": 1.8875, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 9.6875, | |
| "grad_norm": 26.81864356994629, | |
| "learning_rate": 3.6458333333333337e-07, | |
| "loss": 1.8867, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 9.765625, | |
| "eval_loss": 1.0413788557052612, | |
| "eval_runtime": 4.6336, | |
| "eval_samples_per_second": 93.232, | |
| "eval_steps_per_second": 5.827, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 9.84375, | |
| "grad_norm": 24.75679588317871, | |
| "learning_rate": 1.9097222222222225e-07, | |
| "loss": 1.846, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 24.3472843170166, | |
| "learning_rate": 1.736111111111111e-08, | |
| "loss": 1.9162, | |
| "step": 640 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 640, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 300, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |