| { | |
| "best_metric": 0.7391304347826086, | |
| "best_model_checkpoint": "swinv2-tiny-patch4-window8-256-ve-U13-b-80b\\checkpoint-110", | |
| "epoch": 73.84615384615384, | |
| "eval_steps": 500, | |
| "global_step": 480, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.92, | |
| "eval_accuracy": 0.13043478260869565, | |
| "eval_loss": 1.386070728302002, | |
| "eval_runtime": 0.764, | |
| "eval_samples_per_second": 60.21, | |
| "eval_steps_per_second": 2.618, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.0416666666666668e-05, | |
| "loss": 1.386, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.43478260869565216, | |
| "eval_loss": 1.3836690187454224, | |
| "eval_runtime": 0.7849, | |
| "eval_samples_per_second": 58.605, | |
| "eval_steps_per_second": 2.548, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "eval_accuracy": 0.30434782608695654, | |
| "eval_loss": 1.377610445022583, | |
| "eval_runtime": 0.8024, | |
| "eval_samples_per_second": 57.325, | |
| "eval_steps_per_second": 2.492, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 1.3807, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.2391304347826087, | |
| "eval_loss": 1.3570277690887451, | |
| "eval_runtime": 0.7958, | |
| "eval_samples_per_second": 57.804, | |
| "eval_steps_per_second": 2.513, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 3.125e-05, | |
| "loss": 1.3386, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "eval_accuracy": 0.21739130434782608, | |
| "eval_loss": 1.3224332332611084, | |
| "eval_runtime": 0.8074, | |
| "eval_samples_per_second": 56.972, | |
| "eval_steps_per_second": 2.477, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.34782608695652173, | |
| "eval_loss": 1.2084624767303467, | |
| "eval_runtime": 0.7704, | |
| "eval_samples_per_second": 59.709, | |
| "eval_steps_per_second": 2.596, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 6.15, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 1.209, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 6.92, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.1055656671524048, | |
| "eval_runtime": 0.743, | |
| "eval_samples_per_second": 61.909, | |
| "eval_steps_per_second": 2.692, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 7.69, | |
| "learning_rate": 4.976851851851852e-05, | |
| "loss": 1.0561, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.4782608695652174, | |
| "eval_loss": 1.0506606101989746, | |
| "eval_runtime": 0.8037, | |
| "eval_samples_per_second": 57.235, | |
| "eval_steps_per_second": 2.488, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 8.92, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.0161159038543701, | |
| "eval_runtime": 0.8335, | |
| "eval_samples_per_second": 55.191, | |
| "eval_steps_per_second": 2.4, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 9.23, | |
| "learning_rate": 4.8611111111111115e-05, | |
| "loss": 0.9157, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 0.8612546324729919, | |
| "eval_runtime": 0.7953, | |
| "eval_samples_per_second": 57.84, | |
| "eval_steps_per_second": 2.515, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 10.77, | |
| "learning_rate": 4.745370370370371e-05, | |
| "loss": 0.8002, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 10.92, | |
| "eval_accuracy": 0.5652173913043478, | |
| "eval_loss": 0.9073187112808228, | |
| "eval_runtime": 0.8049, | |
| "eval_samples_per_second": 57.148, | |
| "eval_steps_per_second": 2.485, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 0.8299533128738403, | |
| "eval_runtime": 0.7627, | |
| "eval_samples_per_second": 60.309, | |
| "eval_steps_per_second": 2.622, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 12.31, | |
| "learning_rate": 4.62962962962963e-05, | |
| "loss": 0.7181, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 12.92, | |
| "eval_accuracy": 0.5869565217391305, | |
| "eval_loss": 0.8958278894424438, | |
| "eval_runtime": 0.7599, | |
| "eval_samples_per_second": 60.536, | |
| "eval_steps_per_second": 2.632, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 13.85, | |
| "learning_rate": 4.5138888888888894e-05, | |
| "loss": 0.6405, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.717391304347826, | |
| "eval_loss": 0.8074582815170288, | |
| "eval_runtime": 0.7518, | |
| "eval_samples_per_second": 61.185, | |
| "eval_steps_per_second": 2.66, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 14.92, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 0.7477785348892212, | |
| "eval_runtime": 0.8063, | |
| "eval_samples_per_second": 57.054, | |
| "eval_steps_per_second": 2.481, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 15.38, | |
| "learning_rate": 4.3981481481481486e-05, | |
| "loss": 0.6064, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.717391304347826, | |
| "eval_loss": 0.7370060682296753, | |
| "eval_runtime": 0.7799, | |
| "eval_samples_per_second": 58.981, | |
| "eval_steps_per_second": 2.564, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 16.92, | |
| "learning_rate": 4.282407407407408e-05, | |
| "loss": 0.5556, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 16.92, | |
| "eval_accuracy": 0.7391304347826086, | |
| "eval_loss": 0.7056962847709656, | |
| "eval_runtime": 0.7596, | |
| "eval_samples_per_second": 60.555, | |
| "eval_steps_per_second": 2.633, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.7395027279853821, | |
| "eval_runtime": 0.7798, | |
| "eval_samples_per_second": 58.99, | |
| "eval_steps_per_second": 2.565, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 18.46, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 0.4822, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 18.92, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 0.8733614087104797, | |
| "eval_runtime": 0.776, | |
| "eval_samples_per_second": 59.279, | |
| "eval_steps_per_second": 2.577, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 4.0509259259259265e-05, | |
| "loss": 0.4241, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 0.999085545539856, | |
| "eval_runtime": 0.7575, | |
| "eval_samples_per_second": 60.726, | |
| "eval_steps_per_second": 2.64, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 20.92, | |
| "eval_accuracy": 0.717391304347826, | |
| "eval_loss": 0.8415805101394653, | |
| "eval_runtime": 0.7877, | |
| "eval_samples_per_second": 58.398, | |
| "eval_steps_per_second": 2.539, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 21.54, | |
| "learning_rate": 3.935185185185186e-05, | |
| "loss": 0.4307, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 0.9194854497909546, | |
| "eval_runtime": 0.7525, | |
| "eval_samples_per_second": 61.128, | |
| "eval_steps_per_second": 2.658, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 22.92, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.9211203455924988, | |
| "eval_runtime": 0.8883, | |
| "eval_samples_per_second": 51.787, | |
| "eval_steps_per_second": 2.252, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 23.08, | |
| "learning_rate": 3.8194444444444444e-05, | |
| "loss": 0.381, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.6086956521739131, | |
| "eval_loss": 0.9683218598365784, | |
| "eval_runtime": 0.7845, | |
| "eval_samples_per_second": 58.637, | |
| "eval_steps_per_second": 2.549, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 24.62, | |
| "learning_rate": 3.7037037037037037e-05, | |
| "loss": 0.3707, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 24.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.0067435503005981, | |
| "eval_runtime": 0.7893, | |
| "eval_samples_per_second": 58.283, | |
| "eval_steps_per_second": 2.534, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.9793215394020081, | |
| "eval_runtime": 0.7771, | |
| "eval_samples_per_second": 59.195, | |
| "eval_steps_per_second": 2.574, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 26.15, | |
| "learning_rate": 3.587962962962963e-05, | |
| "loss": 0.3918, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 26.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 0.9757705926895142, | |
| "eval_runtime": 0.7633, | |
| "eval_samples_per_second": 60.262, | |
| "eval_steps_per_second": 2.62, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 27.69, | |
| "learning_rate": 3.472222222222222e-05, | |
| "loss": 0.3513, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 0.976148784160614, | |
| "eval_runtime": 0.7679, | |
| "eval_samples_per_second": 59.906, | |
| "eval_steps_per_second": 2.605, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 28.92, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.074488878250122, | |
| "eval_runtime": 0.7646, | |
| "eval_samples_per_second": 60.16, | |
| "eval_steps_per_second": 2.616, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 29.23, | |
| "learning_rate": 3.3564814814814815e-05, | |
| "loss": 0.2739, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.0775023698806763, | |
| "eval_runtime": 0.7662, | |
| "eval_samples_per_second": 60.04, | |
| "eval_steps_per_second": 2.61, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 30.77, | |
| "learning_rate": 3.240740740740741e-05, | |
| "loss": 0.2882, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 30.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.1521120071411133, | |
| "eval_runtime": 0.7664, | |
| "eval_samples_per_second": 60.024, | |
| "eval_steps_per_second": 2.61, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.207231879234314, | |
| "eval_runtime": 0.7362, | |
| "eval_samples_per_second": 62.485, | |
| "eval_steps_per_second": 2.717, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 32.31, | |
| "learning_rate": 3.125e-05, | |
| "loss": 0.2588, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 32.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.1374139785766602, | |
| "eval_runtime": 0.7423, | |
| "eval_samples_per_second": 61.972, | |
| "eval_steps_per_second": 2.694, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 33.85, | |
| "learning_rate": 3.0092592592592593e-05, | |
| "loss": 0.2498, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.2130507230758667, | |
| "eval_runtime": 0.7716, | |
| "eval_samples_per_second": 59.613, | |
| "eval_steps_per_second": 2.592, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 34.92, | |
| "eval_accuracy": 0.7391304347826086, | |
| "eval_loss": 1.130850911140442, | |
| "eval_runtime": 0.76, | |
| "eval_samples_per_second": 60.526, | |
| "eval_steps_per_second": 2.632, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 35.38, | |
| "learning_rate": 2.8935185185185186e-05, | |
| "loss": 0.2584, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.2827527523040771, | |
| "eval_runtime": 0.773, | |
| "eval_samples_per_second": 59.507, | |
| "eval_steps_per_second": 2.587, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 36.92, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.2228, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 36.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.1380703449249268, | |
| "eval_runtime": 0.7667, | |
| "eval_samples_per_second": 59.997, | |
| "eval_steps_per_second": 2.609, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 38.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.211645483970642, | |
| "eval_runtime": 0.7622, | |
| "eval_samples_per_second": 60.348, | |
| "eval_steps_per_second": 2.624, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 38.46, | |
| "learning_rate": 2.6620370370370372e-05, | |
| "loss": 0.2408, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 38.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.19622004032135, | |
| "eval_runtime": 0.759, | |
| "eval_samples_per_second": 60.603, | |
| "eval_steps_per_second": 2.635, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "learning_rate": 2.5462962962962965e-05, | |
| "loss": 0.2042, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.2557106018066406, | |
| "eval_runtime": 0.736, | |
| "eval_samples_per_second": 62.499, | |
| "eval_steps_per_second": 2.717, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 40.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.3511183261871338, | |
| "eval_runtime": 0.8239, | |
| "eval_samples_per_second": 55.831, | |
| "eval_steps_per_second": 2.427, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 41.54, | |
| "learning_rate": 2.4305555555555558e-05, | |
| "loss": 0.2141, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 42.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.3635742664337158, | |
| "eval_runtime": 0.7567, | |
| "eval_samples_per_second": 60.794, | |
| "eval_steps_per_second": 2.643, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 42.92, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.308355450630188, | |
| "eval_runtime": 0.7361, | |
| "eval_samples_per_second": 62.489, | |
| "eval_steps_per_second": 2.717, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 43.08, | |
| "learning_rate": 2.314814814814815e-05, | |
| "loss": 0.2135, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 44.0, | |
| "eval_accuracy": 0.6086956521739131, | |
| "eval_loss": 1.384713053703308, | |
| "eval_runtime": 0.7775, | |
| "eval_samples_per_second": 59.161, | |
| "eval_steps_per_second": 2.572, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 44.62, | |
| "learning_rate": 2.1990740740740743e-05, | |
| "loss": 0.191, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 44.92, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.240804672241211, | |
| "eval_runtime": 0.7487, | |
| "eval_samples_per_second": 61.439, | |
| "eval_steps_per_second": 2.671, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 46.0, | |
| "eval_accuracy": 0.717391304347826, | |
| "eval_loss": 1.1749811172485352, | |
| "eval_runtime": 0.8109, | |
| "eval_samples_per_second": 56.73, | |
| "eval_steps_per_second": 2.467, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 46.15, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 0.1833, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 46.92, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.18035089969635, | |
| "eval_runtime": 0.8181, | |
| "eval_samples_per_second": 56.228, | |
| "eval_steps_per_second": 2.445, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 47.69, | |
| "learning_rate": 1.967592592592593e-05, | |
| "loss": 0.189, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 48.0, | |
| "eval_accuracy": 0.717391304347826, | |
| "eval_loss": 1.1867014169692993, | |
| "eval_runtime": 0.7734, | |
| "eval_samples_per_second": 59.478, | |
| "eval_steps_per_second": 2.586, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 48.92, | |
| "eval_accuracy": 0.7391304347826086, | |
| "eval_loss": 1.0623193979263306, | |
| "eval_runtime": 0.7663, | |
| "eval_samples_per_second": 60.026, | |
| "eval_steps_per_second": 2.61, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 49.23, | |
| "learning_rate": 1.8518518518518518e-05, | |
| "loss": 0.2196, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.2625775337219238, | |
| "eval_runtime": 0.7498, | |
| "eval_samples_per_second": 61.348, | |
| "eval_steps_per_second": 2.667, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 50.77, | |
| "learning_rate": 1.736111111111111e-05, | |
| "loss": 0.1505, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 50.92, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.2744964361190796, | |
| "eval_runtime": 0.7827, | |
| "eval_samples_per_second": 58.771, | |
| "eval_steps_per_second": 2.555, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 52.0, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.3472591638565063, | |
| "eval_runtime": 0.7536, | |
| "eval_samples_per_second": 61.04, | |
| "eval_steps_per_second": 2.654, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 52.31, | |
| "learning_rate": 1.6203703703703704e-05, | |
| "loss": 0.1604, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 52.92, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.353513240814209, | |
| "eval_runtime": 0.7485, | |
| "eval_samples_per_second": 61.457, | |
| "eval_steps_per_second": 2.672, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 53.85, | |
| "learning_rate": 1.5046296296296297e-05, | |
| "loss": 0.1377, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 54.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.3873109817504883, | |
| "eval_runtime": 0.7846, | |
| "eval_samples_per_second": 58.626, | |
| "eval_steps_per_second": 2.549, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 54.92, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.428697943687439, | |
| "eval_runtime": 0.7641, | |
| "eval_samples_per_second": 60.202, | |
| "eval_steps_per_second": 2.617, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 55.38, | |
| "learning_rate": 1.388888888888889e-05, | |
| "loss": 0.1752, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 56.0, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.30143404006958, | |
| "eval_runtime": 0.7694, | |
| "eval_samples_per_second": 59.786, | |
| "eval_steps_per_second": 2.599, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 56.92, | |
| "learning_rate": 1.2731481481481482e-05, | |
| "loss": 0.1684, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 56.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.3564342260360718, | |
| "eval_runtime": 0.7829, | |
| "eval_samples_per_second": 58.754, | |
| "eval_steps_per_second": 2.555, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 58.0, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.4164601564407349, | |
| "eval_runtime": 0.7693, | |
| "eval_samples_per_second": 59.794, | |
| "eval_steps_per_second": 2.6, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 58.46, | |
| "learning_rate": 1.1574074074074075e-05, | |
| "loss": 0.1597, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 58.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.362408995628357, | |
| "eval_runtime": 0.7996, | |
| "eval_samples_per_second": 57.528, | |
| "eval_steps_per_second": 2.501, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 60.0, | |
| "learning_rate": 1.0416666666666668e-05, | |
| "loss": 0.1393, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 60.0, | |
| "eval_accuracy": 0.6956521739130435, | |
| "eval_loss": 1.3018492460250854, | |
| "eval_runtime": 0.7661, | |
| "eval_samples_per_second": 60.042, | |
| "eval_steps_per_second": 2.611, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 60.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.319698452949524, | |
| "eval_runtime": 0.7596, | |
| "eval_samples_per_second": 60.562, | |
| "eval_steps_per_second": 2.633, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 61.54, | |
| "learning_rate": 9.259259259259259e-06, | |
| "loss": 0.1347, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 62.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.3542139530181885, | |
| "eval_runtime": 0.7528, | |
| "eval_samples_per_second": 61.108, | |
| "eval_steps_per_second": 2.657, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 62.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.3459587097167969, | |
| "eval_runtime": 0.7286, | |
| "eval_samples_per_second": 63.139, | |
| "eval_steps_per_second": 2.745, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 63.08, | |
| "learning_rate": 8.101851851851852e-06, | |
| "loss": 0.155, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 64.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.3998229503631592, | |
| "eval_runtime": 0.7524, | |
| "eval_samples_per_second": 61.141, | |
| "eval_steps_per_second": 2.658, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 64.62, | |
| "learning_rate": 6.944444444444445e-06, | |
| "loss": 0.1198, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 64.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.3982192277908325, | |
| "eval_runtime": 0.7529, | |
| "eval_samples_per_second": 61.101, | |
| "eval_steps_per_second": 2.657, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 66.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.3988677263259888, | |
| "eval_runtime": 0.7526, | |
| "eval_samples_per_second": 61.121, | |
| "eval_steps_per_second": 2.657, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 66.15, | |
| "learning_rate": 5.787037037037038e-06, | |
| "loss": 0.1318, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 66.92, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.4035125970840454, | |
| "eval_runtime": 0.7663, | |
| "eval_samples_per_second": 60.028, | |
| "eval_steps_per_second": 2.61, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 67.69, | |
| "learning_rate": 4.6296296296296296e-06, | |
| "loss": 0.1382, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 68.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.3625603914260864, | |
| "eval_runtime": 0.7684, | |
| "eval_samples_per_second": 59.861, | |
| "eval_steps_per_second": 2.603, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 68.92, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.371445655822754, | |
| "eval_runtime": 0.7666, | |
| "eval_samples_per_second": 60.005, | |
| "eval_steps_per_second": 2.609, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 69.23, | |
| "learning_rate": 3.4722222222222224e-06, | |
| "loss": 0.1451, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 70.0, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.4174377918243408, | |
| "eval_runtime": 0.7523, | |
| "eval_samples_per_second": 61.143, | |
| "eval_steps_per_second": 2.658, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 70.77, | |
| "learning_rate": 2.3148148148148148e-06, | |
| "loss": 0.1203, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 70.92, | |
| "eval_accuracy": 0.6739130434782609, | |
| "eval_loss": 1.4342718124389648, | |
| "eval_runtime": 0.7407, | |
| "eval_samples_per_second": 62.103, | |
| "eval_steps_per_second": 2.7, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 72.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.4045405387878418, | |
| "eval_runtime": 0.7688, | |
| "eval_samples_per_second": 59.83, | |
| "eval_steps_per_second": 2.601, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 72.31, | |
| "learning_rate": 1.1574074074074074e-06, | |
| "loss": 0.141, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 72.92, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.3903690576553345, | |
| "eval_runtime": 0.7374, | |
| "eval_samples_per_second": 62.385, | |
| "eval_steps_per_second": 2.712, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 73.85, | |
| "learning_rate": 0.0, | |
| "loss": 0.1516, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 73.85, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.3849464654922485, | |
| "eval_runtime": 0.7559, | |
| "eval_samples_per_second": 60.851, | |
| "eval_steps_per_second": 2.646, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 73.85, | |
| "step": 480, | |
| "total_flos": 1.9681438145406566e+18, | |
| "train_loss": 0.3953114648660024, | |
| "train_runtime": 812.6336, | |
| "train_samples_per_second": 80.627, | |
| "train_steps_per_second": 0.591 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 480, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 80, | |
| "save_steps": 500, | |
| "total_flos": 1.9681438145406566e+18, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |