| { | |
| "best_metric": 0.8732394366197183, | |
| "best_model_checkpoint": "deit-base-distilled-patch16-224-65-fold1/checkpoint-201", | |
| "epoch": 92.3076923076923, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.9230769230769231, | |
| "eval_accuracy": 0.49295774647887325, | |
| "eval_loss": 0.7887781858444214, | |
| "eval_runtime": 1.0049, | |
| "eval_samples_per_second": 70.656, | |
| "eval_steps_per_second": 2.985, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 1.8461538461538463, | |
| "eval_accuracy": 0.5070422535211268, | |
| "eval_loss": 0.7159498333930969, | |
| "eval_runtime": 0.9626, | |
| "eval_samples_per_second": 73.757, | |
| "eval_steps_per_second": 3.116, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 2.769230769230769, | |
| "eval_accuracy": 0.5070422535211268, | |
| "eval_loss": 0.7091380953788757, | |
| "eval_runtime": 0.9551, | |
| "eval_samples_per_second": 74.336, | |
| "eval_steps_per_second": 3.141, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 3.076923076923077, | |
| "grad_norm": 5.692840576171875, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.7703, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.5352112676056338, | |
| "eval_loss": 0.6907549500465393, | |
| "eval_runtime": 0.9654, | |
| "eval_samples_per_second": 73.543, | |
| "eval_steps_per_second": 3.107, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 4.923076923076923, | |
| "eval_accuracy": 0.6197183098591549, | |
| "eval_loss": 0.6527045965194702, | |
| "eval_runtime": 0.978, | |
| "eval_samples_per_second": 72.596, | |
| "eval_steps_per_second": 3.067, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 5.846153846153846, | |
| "eval_accuracy": 0.7323943661971831, | |
| "eval_loss": 0.6236115097999573, | |
| "eval_runtime": 0.972, | |
| "eval_samples_per_second": 73.044, | |
| "eval_steps_per_second": 3.086, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 6.153846153846154, | |
| "grad_norm": 4.818113327026367, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.6435, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 6.769230769230769, | |
| "eval_accuracy": 0.6901408450704225, | |
| "eval_loss": 0.6357130408287048, | |
| "eval_runtime": 0.9775, | |
| "eval_samples_per_second": 72.631, | |
| "eval_steps_per_second": 3.069, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.704225352112676, | |
| "eval_loss": 0.5442065596580505, | |
| "eval_runtime": 1.0045, | |
| "eval_samples_per_second": 70.683, | |
| "eval_steps_per_second": 2.987, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 8.923076923076923, | |
| "eval_accuracy": 0.7183098591549296, | |
| "eval_loss": 0.5448713898658752, | |
| "eval_runtime": 0.9838, | |
| "eval_samples_per_second": 72.166, | |
| "eval_steps_per_second": 3.049, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 9.23076923076923, | |
| "grad_norm": 3.4911949634552, | |
| "learning_rate": 5e-05, | |
| "loss": 0.5366, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 9.846153846153847, | |
| "eval_accuracy": 0.7464788732394366, | |
| "eval_loss": 0.5123764872550964, | |
| "eval_runtime": 0.9995, | |
| "eval_samples_per_second": 71.034, | |
| "eval_steps_per_second": 3.001, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 10.76923076923077, | |
| "eval_accuracy": 0.704225352112676, | |
| "eval_loss": 0.5028589367866516, | |
| "eval_runtime": 0.9867, | |
| "eval_samples_per_second": 71.958, | |
| "eval_steps_per_second": 3.04, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.7183098591549296, | |
| "eval_loss": 0.5486107468605042, | |
| "eval_runtime": 0.9813, | |
| "eval_samples_per_second": 72.355, | |
| "eval_steps_per_second": 3.057, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 12.307692307692308, | |
| "grad_norm": 6.1214070320129395, | |
| "learning_rate": 4.814814814814815e-05, | |
| "loss": 0.4577, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 12.923076923076923, | |
| "eval_accuracy": 0.676056338028169, | |
| "eval_loss": 0.5393596291542053, | |
| "eval_runtime": 0.9861, | |
| "eval_samples_per_second": 71.997, | |
| "eval_steps_per_second": 3.042, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 13.846153846153847, | |
| "eval_accuracy": 0.7464788732394366, | |
| "eval_loss": 0.5510964393615723, | |
| "eval_runtime": 0.9874, | |
| "eval_samples_per_second": 71.909, | |
| "eval_steps_per_second": 3.038, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 14.76923076923077, | |
| "eval_accuracy": 0.6901408450704225, | |
| "eval_loss": 0.5794062614440918, | |
| "eval_runtime": 0.9882, | |
| "eval_samples_per_second": 71.849, | |
| "eval_steps_per_second": 3.036, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 15.384615384615385, | |
| "grad_norm": 4.4983015060424805, | |
| "learning_rate": 4.62962962962963e-05, | |
| "loss": 0.4187, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.7323943661971831, | |
| "eval_loss": 0.53675776720047, | |
| "eval_runtime": 1.0161, | |
| "eval_samples_per_second": 69.878, | |
| "eval_steps_per_second": 2.953, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 16.923076923076923, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.4677673578262329, | |
| "eval_runtime": 0.9871, | |
| "eval_samples_per_second": 71.928, | |
| "eval_steps_per_second": 3.039, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 17.846153846153847, | |
| "eval_accuracy": 0.704225352112676, | |
| "eval_loss": 0.6597200036048889, | |
| "eval_runtime": 1.0221, | |
| "eval_samples_per_second": 69.468, | |
| "eval_steps_per_second": 2.935, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 18.46153846153846, | |
| "grad_norm": 4.643094539642334, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 0.3542, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 18.76923076923077, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.4969327747821808, | |
| "eval_runtime": 0.9908, | |
| "eval_samples_per_second": 71.661, | |
| "eval_steps_per_second": 3.028, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.7323943661971831, | |
| "eval_loss": 0.7102980017662048, | |
| "eval_runtime": 0.9798, | |
| "eval_samples_per_second": 72.463, | |
| "eval_steps_per_second": 3.062, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 20.923076923076923, | |
| "eval_accuracy": 0.7605633802816901, | |
| "eval_loss": 0.49788960814476013, | |
| "eval_runtime": 0.9857, | |
| "eval_samples_per_second": 72.032, | |
| "eval_steps_per_second": 3.044, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 21.53846153846154, | |
| "grad_norm": 3.1563220024108887, | |
| "learning_rate": 4.259259259259259e-05, | |
| "loss": 0.3057, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 21.846153846153847, | |
| "eval_accuracy": 0.7323943661971831, | |
| "eval_loss": 0.5270604491233826, | |
| "eval_runtime": 0.9884, | |
| "eval_samples_per_second": 71.835, | |
| "eval_steps_per_second": 3.035, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 22.76923076923077, | |
| "eval_accuracy": 0.7746478873239436, | |
| "eval_loss": 0.5357353091239929, | |
| "eval_runtime": 0.9814, | |
| "eval_samples_per_second": 72.342, | |
| "eval_steps_per_second": 3.057, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.48468706011772156, | |
| "eval_runtime": 0.9993, | |
| "eval_samples_per_second": 71.047, | |
| "eval_steps_per_second": 3.002, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 24.615384615384617, | |
| "grad_norm": 6.270655632019043, | |
| "learning_rate": 4.074074074074074e-05, | |
| "loss": 0.2816, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 24.923076923076923, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.5424976944923401, | |
| "eval_runtime": 0.9807, | |
| "eval_samples_per_second": 72.4, | |
| "eval_steps_per_second": 3.059, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 25.846153846153847, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.5239164233207703, | |
| "eval_runtime": 1.0158, | |
| "eval_samples_per_second": 69.897, | |
| "eval_steps_per_second": 2.953, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 26.76923076923077, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.4140526354312897, | |
| "eval_runtime": 0.9875, | |
| "eval_samples_per_second": 71.897, | |
| "eval_steps_per_second": 3.038, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 27.692307692307693, | |
| "grad_norm": 3.314903497695923, | |
| "learning_rate": 3.888888888888889e-05, | |
| "loss": 0.2881, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.4996558725833893, | |
| "eval_runtime": 0.9975, | |
| "eval_samples_per_second": 71.178, | |
| "eval_steps_per_second": 3.008, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 28.923076923076923, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.42159879207611084, | |
| "eval_runtime": 0.983, | |
| "eval_samples_per_second": 72.227, | |
| "eval_steps_per_second": 3.052, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 29.846153846153847, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.46680712699890137, | |
| "eval_runtime": 0.9908, | |
| "eval_samples_per_second": 71.663, | |
| "eval_steps_per_second": 3.028, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 30.76923076923077, | |
| "grad_norm": 2.419731616973877, | |
| "learning_rate": 3.7037037037037037e-05, | |
| "loss": 0.2421, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 30.76923076923077, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.5904483199119568, | |
| "eval_runtime": 0.9786, | |
| "eval_samples_per_second": 72.553, | |
| "eval_steps_per_second": 3.066, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_accuracy": 0.7746478873239436, | |
| "eval_loss": 0.5239621996879578, | |
| "eval_runtime": 0.9902, | |
| "eval_samples_per_second": 71.7, | |
| "eval_steps_per_second": 3.03, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 32.92307692307692, | |
| "eval_accuracy": 0.7605633802816901, | |
| "eval_loss": 0.99370938539505, | |
| "eval_runtime": 0.9918, | |
| "eval_samples_per_second": 71.586, | |
| "eval_steps_per_second": 3.025, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 33.84615384615385, | |
| "grad_norm": 2.3661601543426514, | |
| "learning_rate": 3.518518518518519e-05, | |
| "loss": 0.2402, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 33.84615384615385, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.4988890588283539, | |
| "eval_runtime": 0.9897, | |
| "eval_samples_per_second": 71.739, | |
| "eval_steps_per_second": 3.031, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 34.76923076923077, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.7231740951538086, | |
| "eval_runtime": 0.9872, | |
| "eval_samples_per_second": 71.923, | |
| "eval_steps_per_second": 3.039, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "eval_accuracy": 0.8450704225352113, | |
| "eval_loss": 0.4815123677253723, | |
| "eval_runtime": 0.9925, | |
| "eval_samples_per_second": 71.533, | |
| "eval_steps_per_second": 3.023, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 36.92307692307692, | |
| "grad_norm": 2.6437292098999023, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.1862, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 36.92307692307692, | |
| "eval_accuracy": 0.7746478873239436, | |
| "eval_loss": 0.7431491017341614, | |
| "eval_runtime": 0.9913, | |
| "eval_samples_per_second": 71.622, | |
| "eval_steps_per_second": 3.026, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 37.84615384615385, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.44340020418167114, | |
| "eval_runtime": 0.9934, | |
| "eval_samples_per_second": 71.475, | |
| "eval_steps_per_second": 3.02, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 38.76923076923077, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.4759599268436432, | |
| "eval_runtime": 0.9853, | |
| "eval_samples_per_second": 72.061, | |
| "eval_steps_per_second": 3.045, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "grad_norm": 2.1550378799438477, | |
| "learning_rate": 3.148148148148148e-05, | |
| "loss": 0.1783, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.5006221532821655, | |
| "eval_runtime": 0.9897, | |
| "eval_samples_per_second": 71.737, | |
| "eval_steps_per_second": 3.031, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 40.92307692307692, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.49856680631637573, | |
| "eval_runtime": 0.9788, | |
| "eval_samples_per_second": 72.535, | |
| "eval_steps_per_second": 3.065, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 41.84615384615385, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.794731616973877, | |
| "eval_runtime": 0.9805, | |
| "eval_samples_per_second": 72.414, | |
| "eval_steps_per_second": 3.06, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 42.76923076923077, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.48966798186302185, | |
| "eval_runtime": 0.9856, | |
| "eval_samples_per_second": 72.035, | |
| "eval_steps_per_second": 3.044, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 43.07692307692308, | |
| "grad_norm": 4.09879207611084, | |
| "learning_rate": 2.962962962962963e-05, | |
| "loss": 0.1685, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 44.0, | |
| "eval_accuracy": 0.7605633802816901, | |
| "eval_loss": 0.7500377893447876, | |
| "eval_runtime": 0.9859, | |
| "eval_samples_per_second": 72.013, | |
| "eval_steps_per_second": 3.043, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 44.92307692307692, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.6052554249763489, | |
| "eval_runtime": 0.9867, | |
| "eval_samples_per_second": 71.954, | |
| "eval_steps_per_second": 3.04, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 45.84615384615385, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.4776698350906372, | |
| "eval_runtime": 0.9829, | |
| "eval_samples_per_second": 72.233, | |
| "eval_steps_per_second": 3.052, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 46.15384615384615, | |
| "grad_norm": 2.7736315727233887, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.1779, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 46.76923076923077, | |
| "eval_accuracy": 0.7746478873239436, | |
| "eval_loss": 0.5799605250358582, | |
| "eval_runtime": 0.9783, | |
| "eval_samples_per_second": 72.578, | |
| "eval_steps_per_second": 3.067, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 48.0, | |
| "eval_accuracy": 0.8450704225352113, | |
| "eval_loss": 0.4681435227394104, | |
| "eval_runtime": 0.984, | |
| "eval_samples_per_second": 72.155, | |
| "eval_steps_per_second": 3.049, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 48.92307692307692, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.7729381322860718, | |
| "eval_runtime": 0.9844, | |
| "eval_samples_per_second": 72.123, | |
| "eval_steps_per_second": 3.047, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 49.23076923076923, | |
| "grad_norm": 6.783754825592041, | |
| "learning_rate": 2.5925925925925925e-05, | |
| "loss": 0.1502, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 49.84615384615385, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.6486974358558655, | |
| "eval_runtime": 0.9835, | |
| "eval_samples_per_second": 72.191, | |
| "eval_steps_per_second": 3.05, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 50.76923076923077, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.5224327445030212, | |
| "eval_runtime": 0.982, | |
| "eval_samples_per_second": 72.3, | |
| "eval_steps_per_second": 3.055, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 52.0, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.7017256021499634, | |
| "eval_runtime": 1.0037, | |
| "eval_samples_per_second": 70.739, | |
| "eval_steps_per_second": 2.989, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 52.30769230769231, | |
| "grad_norm": 2.59287428855896, | |
| "learning_rate": 2.4074074074074074e-05, | |
| "loss": 0.1586, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 52.92307692307692, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.603446364402771, | |
| "eval_runtime": 1.0504, | |
| "eval_samples_per_second": 67.596, | |
| "eval_steps_per_second": 2.856, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 53.84615384615385, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.5791419744491577, | |
| "eval_runtime": 1.0306, | |
| "eval_samples_per_second": 68.891, | |
| "eval_steps_per_second": 2.911, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 54.76923076923077, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.565058708190918, | |
| "eval_runtime": 0.9801, | |
| "eval_samples_per_second": 72.445, | |
| "eval_steps_per_second": 3.061, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 55.38461538461539, | |
| "grad_norm": 3.497166872024536, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.134, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 56.0, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.486237496137619, | |
| "eval_runtime": 0.98, | |
| "eval_samples_per_second": 72.446, | |
| "eval_steps_per_second": 3.061, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 56.92307692307692, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.6750636100769043, | |
| "eval_runtime": 0.9858, | |
| "eval_samples_per_second": 72.026, | |
| "eval_steps_per_second": 3.043, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 57.84615384615385, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.5924672484397888, | |
| "eval_runtime": 1.0279, | |
| "eval_samples_per_second": 69.075, | |
| "eval_steps_per_second": 2.919, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 58.46153846153846, | |
| "grad_norm": 1.8251292705535889, | |
| "learning_rate": 2.037037037037037e-05, | |
| "loss": 0.1602, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 58.76923076923077, | |
| "eval_accuracy": 0.8450704225352113, | |
| "eval_loss": 0.39819350838661194, | |
| "eval_runtime": 0.9991, | |
| "eval_samples_per_second": 71.061, | |
| "eval_steps_per_second": 3.003, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 60.0, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.5968966484069824, | |
| "eval_runtime": 1.0038, | |
| "eval_samples_per_second": 70.73, | |
| "eval_steps_per_second": 2.989, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 60.92307692307692, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.5720605254173279, | |
| "eval_runtime": 0.9693, | |
| "eval_samples_per_second": 73.252, | |
| "eval_steps_per_second": 3.095, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 61.53846153846154, | |
| "grad_norm": 1.8045357465744019, | |
| "learning_rate": 1.8518518518518518e-05, | |
| "loss": 0.1217, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 61.84615384615385, | |
| "eval_accuracy": 0.8732394366197183, | |
| "eval_loss": 0.38157418370246887, | |
| "eval_runtime": 0.9841, | |
| "eval_samples_per_second": 72.148, | |
| "eval_steps_per_second": 3.048, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 62.76923076923077, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.4109618365764618, | |
| "eval_runtime": 0.9741, | |
| "eval_samples_per_second": 72.89, | |
| "eval_steps_per_second": 3.08, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 64.0, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.6715837717056274, | |
| "eval_runtime": 0.9934, | |
| "eval_samples_per_second": 71.469, | |
| "eval_steps_per_second": 3.02, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 64.61538461538461, | |
| "grad_norm": 4.87626838684082, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.1274, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 64.92307692307692, | |
| "eval_accuracy": 0.8732394366197183, | |
| "eval_loss": 0.34985408186912537, | |
| "eval_runtime": 0.9766, | |
| "eval_samples_per_second": 72.704, | |
| "eval_steps_per_second": 3.072, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 65.84615384615384, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.36712145805358887, | |
| "eval_runtime": 0.9825, | |
| "eval_samples_per_second": 72.266, | |
| "eval_steps_per_second": 3.053, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 66.76923076923077, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.5317980051040649, | |
| "eval_runtime": 0.9694, | |
| "eval_samples_per_second": 73.245, | |
| "eval_steps_per_second": 3.095, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 67.6923076923077, | |
| "grad_norm": 1.64058518409729, | |
| "learning_rate": 1.4814814814814815e-05, | |
| "loss": 0.1277, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 68.0, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.6733711361885071, | |
| "eval_runtime": 0.9874, | |
| "eval_samples_per_second": 71.905, | |
| "eval_steps_per_second": 3.038, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 68.92307692307692, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.47263458371162415, | |
| "eval_runtime": 0.9771, | |
| "eval_samples_per_second": 72.665, | |
| "eval_steps_per_second": 3.07, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 69.84615384615384, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.4311037063598633, | |
| "eval_runtime": 0.9776, | |
| "eval_samples_per_second": 72.63, | |
| "eval_steps_per_second": 3.069, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 70.76923076923077, | |
| "grad_norm": 2.334254026412964, | |
| "learning_rate": 1.2962962962962962e-05, | |
| "loss": 0.1232, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 70.76923076923077, | |
| "eval_accuracy": 0.7746478873239436, | |
| "eval_loss": 0.7072361707687378, | |
| "eval_runtime": 0.969, | |
| "eval_samples_per_second": 73.27, | |
| "eval_steps_per_second": 3.096, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 72.0, | |
| "eval_accuracy": 0.7887323943661971, | |
| "eval_loss": 0.5859442949295044, | |
| "eval_runtime": 0.9757, | |
| "eval_samples_per_second": 72.767, | |
| "eval_steps_per_second": 3.075, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 72.92307692307692, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.3757818341255188, | |
| "eval_runtime": 0.9776, | |
| "eval_samples_per_second": 72.628, | |
| "eval_steps_per_second": 3.069, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 73.84615384615384, | |
| "grad_norm": 1.5687333345413208, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 0.1293, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 73.84615384615384, | |
| "eval_accuracy": 0.8450704225352113, | |
| "eval_loss": 0.3672768175601959, | |
| "eval_runtime": 0.977, | |
| "eval_samples_per_second": 72.668, | |
| "eval_steps_per_second": 3.07, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 74.76923076923077, | |
| "eval_accuracy": 0.8591549295774648, | |
| "eval_loss": 0.367299884557724, | |
| "eval_runtime": 0.9773, | |
| "eval_samples_per_second": 72.646, | |
| "eval_steps_per_second": 3.07, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 76.0, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.47515830397605896, | |
| "eval_runtime": 0.9705, | |
| "eval_samples_per_second": 73.158, | |
| "eval_steps_per_second": 3.091, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 76.92307692307692, | |
| "grad_norm": 2.638277053833008, | |
| "learning_rate": 9.259259259259259e-06, | |
| "loss": 0.1117, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 76.92307692307692, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.44498294591903687, | |
| "eval_runtime": 0.9801, | |
| "eval_samples_per_second": 72.44, | |
| "eval_steps_per_second": 3.061, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 77.84615384615384, | |
| "eval_accuracy": 0.8450704225352113, | |
| "eval_loss": 0.44371137022972107, | |
| "eval_runtime": 0.9778, | |
| "eval_samples_per_second": 72.608, | |
| "eval_steps_per_second": 3.068, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 78.76923076923077, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.4329843521118164, | |
| "eval_runtime": 0.9979, | |
| "eval_samples_per_second": 71.146, | |
| "eval_steps_per_second": 3.006, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 80.0, | |
| "grad_norm": 2.258843421936035, | |
| "learning_rate": 7.4074074074074075e-06, | |
| "loss": 0.1092, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 80.0, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.5095254182815552, | |
| "eval_runtime": 0.9679, | |
| "eval_samples_per_second": 73.356, | |
| "eval_steps_per_second": 3.1, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 80.92307692307692, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.49475494027137756, | |
| "eval_runtime": 0.9923, | |
| "eval_samples_per_second": 71.549, | |
| "eval_steps_per_second": 3.023, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 81.84615384615384, | |
| "eval_accuracy": 0.8591549295774648, | |
| "eval_loss": 0.413480281829834, | |
| "eval_runtime": 0.9737, | |
| "eval_samples_per_second": 72.914, | |
| "eval_steps_per_second": 3.081, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 82.76923076923077, | |
| "eval_accuracy": 0.8450704225352113, | |
| "eval_loss": 0.4189954698085785, | |
| "eval_runtime": 0.9819, | |
| "eval_samples_per_second": 72.311, | |
| "eval_steps_per_second": 3.055, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 83.07692307692308, | |
| "grad_norm": 2.083444595336914, | |
| "learning_rate": 5.555555555555556e-06, | |
| "loss": 0.1151, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 84.0, | |
| "eval_accuracy": 0.8732394366197183, | |
| "eval_loss": 0.41941961646080017, | |
| "eval_runtime": 0.9662, | |
| "eval_samples_per_second": 73.482, | |
| "eval_steps_per_second": 3.105, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 84.92307692307692, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.43557149171829224, | |
| "eval_runtime": 0.9875, | |
| "eval_samples_per_second": 71.901, | |
| "eval_steps_per_second": 3.038, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 85.84615384615384, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.46227431297302246, | |
| "eval_runtime": 0.9652, | |
| "eval_samples_per_second": 73.558, | |
| "eval_steps_per_second": 3.108, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 86.15384615384616, | |
| "grad_norm": 3.552259683609009, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 0.1085, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 86.76923076923077, | |
| "eval_accuracy": 0.8309859154929577, | |
| "eval_loss": 0.48452523350715637, | |
| "eval_runtime": 0.9766, | |
| "eval_samples_per_second": 72.702, | |
| "eval_steps_per_second": 3.072, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 88.0, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.49984505772590637, | |
| "eval_runtime": 0.9733, | |
| "eval_samples_per_second": 72.949, | |
| "eval_steps_per_second": 3.082, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 88.92307692307692, | |
| "eval_accuracy": 0.8028169014084507, | |
| "eval_loss": 0.5181298851966858, | |
| "eval_runtime": 0.9778, | |
| "eval_samples_per_second": 72.612, | |
| "eval_steps_per_second": 3.068, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 89.23076923076923, | |
| "grad_norm": 3.29520583152771, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "loss": 0.0908, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 89.84615384615384, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.5373069643974304, | |
| "eval_runtime": 0.9643, | |
| "eval_samples_per_second": 73.627, | |
| "eval_steps_per_second": 3.111, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 90.76923076923077, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.5465375185012817, | |
| "eval_runtime": 0.9916, | |
| "eval_samples_per_second": 71.6, | |
| "eval_steps_per_second": 3.025, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 92.0, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.5421547889709473, | |
| "eval_runtime": 0.9804, | |
| "eval_samples_per_second": 72.422, | |
| "eval_steps_per_second": 3.06, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 92.3076923076923, | |
| "grad_norm": 1.8515039682388306, | |
| "learning_rate": 0.0, | |
| "loss": 0.0902, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 92.3076923076923, | |
| "eval_accuracy": 0.8169014084507042, | |
| "eval_loss": 0.5417219996452332, | |
| "eval_runtime": 0.9779, | |
| "eval_samples_per_second": 72.602, | |
| "eval_steps_per_second": 3.068, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 92.3076923076923, | |
| "step": 300, | |
| "total_flos": 2.8402872494292173e+18, | |
| "train_loss": 0.23691350758075713, | |
| "train_runtime": 1617.7362, | |
| "train_samples_per_second": 24.54, | |
| "train_steps_per_second": 0.185 | |
| }, | |
| { | |
| "epoch": 92.3076923076923, | |
| "eval_accuracy": 0.8732394366197183, | |
| "eval_loss": 0.38157418370246887, | |
| "eval_runtime": 1.0366, | |
| "eval_samples_per_second": 68.495, | |
| "eval_steps_per_second": 2.894, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 300, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 100, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.8402872494292173e+18, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |