| { | |
| "best_metric": 0.9117647058823529, | |
| "best_model_checkpoint": "deit-tiny-patch16-224-finetuned-papsmear/checkpoint-269", | |
| "epoch": 14.805194805194805, | |
| "eval_steps": 500, | |
| "global_step": 285, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5194805194805194, | |
| "grad_norm": 11.024985313415527, | |
| "learning_rate": 1.7241379310344828e-05, | |
| "loss": 0.3521, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.987012987012987, | |
| "eval_accuracy": 0.8235294117647058, | |
| "eval_loss": 0.4595417082309723, | |
| "eval_runtime": 28.5401, | |
| "eval_samples_per_second": 4.765, | |
| "eval_steps_per_second": 0.315, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.0389610389610389, | |
| "grad_norm": 14.813636779785156, | |
| "learning_rate": 3.4482758620689657e-05, | |
| "loss": 0.3115, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.5584415584415585, | |
| "grad_norm": 16.087106704711914, | |
| "learning_rate": 4.9804687500000004e-05, | |
| "loss": 0.335, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.974025974025974, | |
| "eval_accuracy": 0.8602941176470589, | |
| "eval_loss": 0.4491410553455353, | |
| "eval_runtime": 27.9762, | |
| "eval_samples_per_second": 4.861, | |
| "eval_steps_per_second": 0.322, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 2.0779220779220777, | |
| "grad_norm": 19.62188720703125, | |
| "learning_rate": 4.78515625e-05, | |
| "loss": 0.4021, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.5974025974025974, | |
| "grad_norm": 12.998429298400879, | |
| "learning_rate": 4.58984375e-05, | |
| "loss": 0.3248, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.961038961038961, | |
| "eval_accuracy": 0.875, | |
| "eval_loss": 0.4195605218410492, | |
| "eval_runtime": 28.3986, | |
| "eval_samples_per_second": 4.789, | |
| "eval_steps_per_second": 0.317, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 3.116883116883117, | |
| "grad_norm": 8.330426216125488, | |
| "learning_rate": 4.3945312500000005e-05, | |
| "loss": 0.3613, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 3.6363636363636362, | |
| "grad_norm": 10.42120361328125, | |
| "learning_rate": 4.1992187500000003e-05, | |
| "loss": 0.3271, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.8014705882352942, | |
| "eval_loss": 0.5466737151145935, | |
| "eval_runtime": 28.1607, | |
| "eval_samples_per_second": 4.829, | |
| "eval_steps_per_second": 0.32, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 4.1558441558441555, | |
| "grad_norm": 14.403099060058594, | |
| "learning_rate": 4.00390625e-05, | |
| "loss": 0.3451, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 4.675324675324675, | |
| "grad_norm": 18.443952560424805, | |
| "learning_rate": 3.80859375e-05, | |
| "loss": 0.3286, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 4.987012987012987, | |
| "eval_accuracy": 0.8161764705882353, | |
| "eval_loss": 0.4768396317958832, | |
| "eval_runtime": 28.7873, | |
| "eval_samples_per_second": 4.724, | |
| "eval_steps_per_second": 0.313, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 5.194805194805195, | |
| "grad_norm": 8.146430969238281, | |
| "learning_rate": 3.6132812500000005e-05, | |
| "loss": 0.2577, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.714285714285714, | |
| "grad_norm": 7.037234306335449, | |
| "learning_rate": 3.41796875e-05, | |
| "loss": 0.2854, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 5.974025974025974, | |
| "eval_accuracy": 0.8676470588235294, | |
| "eval_loss": 0.41474443674087524, | |
| "eval_runtime": 28.6719, | |
| "eval_samples_per_second": 4.743, | |
| "eval_steps_per_second": 0.314, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.233766233766234, | |
| "grad_norm": 10.498587608337402, | |
| "learning_rate": 3.22265625e-05, | |
| "loss": 0.2295, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 6.753246753246753, | |
| "grad_norm": 13.865105628967285, | |
| "learning_rate": 3.02734375e-05, | |
| "loss": 0.2291, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 6.961038961038961, | |
| "eval_accuracy": 0.8676470588235294, | |
| "eval_loss": 0.4321264624595642, | |
| "eval_runtime": 28.1356, | |
| "eval_samples_per_second": 4.834, | |
| "eval_steps_per_second": 0.32, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 7.2727272727272725, | |
| "grad_norm": 14.981769561767578, | |
| "learning_rate": 2.83203125e-05, | |
| "loss": 0.2356, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 7.792207792207792, | |
| "grad_norm": 17.98302459716797, | |
| "learning_rate": 2.63671875e-05, | |
| "loss": 0.2619, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.8235294117647058, | |
| "eval_loss": 0.5726210474967957, | |
| "eval_runtime": 28.5909, | |
| "eval_samples_per_second": 4.757, | |
| "eval_steps_per_second": 0.315, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 8.311688311688311, | |
| "grad_norm": 17.48947525024414, | |
| "learning_rate": 2.44140625e-05, | |
| "loss": 0.2315, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 8.831168831168831, | |
| "grad_norm": 18.059572219848633, | |
| "learning_rate": 2.24609375e-05, | |
| "loss": 0.2196, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 8.987012987012987, | |
| "eval_accuracy": 0.8676470588235294, | |
| "eval_loss": 0.4344201982021332, | |
| "eval_runtime": 28.8842, | |
| "eval_samples_per_second": 4.708, | |
| "eval_steps_per_second": 0.312, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 9.35064935064935, | |
| "grad_norm": 13.211594581604004, | |
| "learning_rate": 2.05078125e-05, | |
| "loss": 0.22, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 9.87012987012987, | |
| "grad_norm": 14.067967414855957, | |
| "learning_rate": 1.85546875e-05, | |
| "loss": 0.2116, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 9.974025974025974, | |
| "eval_accuracy": 0.875, | |
| "eval_loss": 0.38092589378356934, | |
| "eval_runtime": 28.2538, | |
| "eval_samples_per_second": 4.814, | |
| "eval_steps_per_second": 0.319, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 10.38961038961039, | |
| "grad_norm": 8.458534240722656, | |
| "learning_rate": 1.66015625e-05, | |
| "loss": 0.1843, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 10.909090909090908, | |
| "grad_norm": 11.62593936920166, | |
| "learning_rate": 1.4648437500000001e-05, | |
| "loss": 0.1913, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 10.96103896103896, | |
| "eval_accuracy": 0.8602941176470589, | |
| "eval_loss": 0.37573114037513733, | |
| "eval_runtime": 28.2614, | |
| "eval_samples_per_second": 4.812, | |
| "eval_steps_per_second": 0.318, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 11.428571428571429, | |
| "grad_norm": 15.429533958435059, | |
| "learning_rate": 1.2695312500000001e-05, | |
| "loss": 0.1654, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 11.948051948051948, | |
| "grad_norm": 8.531671524047852, | |
| "learning_rate": 1.0742187500000001e-05, | |
| "loss": 0.1604, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.8897058823529411, | |
| "eval_loss": 0.35508498549461365, | |
| "eval_runtime": 28.7628, | |
| "eval_samples_per_second": 4.728, | |
| "eval_steps_per_second": 0.313, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 12.467532467532468, | |
| "grad_norm": 7.988156318664551, | |
| "learning_rate": 8.789062500000001e-06, | |
| "loss": 0.1558, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 12.987012987012987, | |
| "grad_norm": 6.887962341308594, | |
| "learning_rate": 6.8359375e-06, | |
| "loss": 0.1307, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 12.987012987012987, | |
| "eval_accuracy": 0.8970588235294118, | |
| "eval_loss": 0.3330402374267578, | |
| "eval_runtime": 29.009, | |
| "eval_samples_per_second": 4.688, | |
| "eval_steps_per_second": 0.31, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 13.506493506493506, | |
| "grad_norm": 7.4578070640563965, | |
| "learning_rate": 4.8828125e-06, | |
| "loss": 0.1425, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 13.974025974025974, | |
| "eval_accuracy": 0.9117647058823529, | |
| "eval_loss": 0.34208229184150696, | |
| "eval_runtime": 28.868, | |
| "eval_samples_per_second": 4.711, | |
| "eval_steps_per_second": 0.312, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 14.025974025974026, | |
| "grad_norm": 13.509991645812988, | |
| "learning_rate": 2.9296875e-06, | |
| "loss": 0.1368, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 14.545454545454545, | |
| "grad_norm": 9.896185874938965, | |
| "learning_rate": 9.765625e-07, | |
| "loss": 0.141, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 14.805194805194805, | |
| "eval_accuracy": 0.9117647058823529, | |
| "eval_loss": 0.3409328758716583, | |
| "eval_runtime": 28.479, | |
| "eval_samples_per_second": 4.775, | |
| "eval_steps_per_second": 0.316, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 14.805194805194805, | |
| "step": 285, | |
| "total_flos": 9.04681758989353e+16, | |
| "train_loss": 0.2430994153022766, | |
| "train_runtime": 4010.606, | |
| "train_samples_per_second": 4.578, | |
| "train_steps_per_second": 0.071 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 285, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.04681758989353e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |