| { | |
| "best_metric": 0.7121951219512195, | |
| "best_model_checkpoint": "swinv2-tiny-patch4-window8-256-finetuned-gardner-exp-max/checkpoint-159", | |
| "epoch": 24.137931034482758, | |
| "eval_steps": 500, | |
| "global_step": 350, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 1.6068, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "eval_accuracy": 0.5414634146341464, | |
| "eval_loss": 1.5808794498443604, | |
| "eval_runtime": 3.5265, | |
| "eval_samples_per_second": 58.131, | |
| "eval_steps_per_second": 1.985, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 2.857142857142857e-05, | |
| "loss": 1.56, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.5414634146341464, | |
| "eval_loss": 1.2830290794372559, | |
| "eval_runtime": 3.4914, | |
| "eval_samples_per_second": 58.716, | |
| "eval_steps_per_second": 2.005, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 4.2857142857142856e-05, | |
| "loss": 1.3595, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 4.9206349206349204e-05, | |
| "loss": 1.1852, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "eval_accuracy": 0.5414634146341464, | |
| "eval_loss": 1.0793886184692383, | |
| "eval_runtime": 3.5623, | |
| "eval_samples_per_second": 57.547, | |
| "eval_steps_per_second": 1.965, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 4.761904761904762e-05, | |
| "loss": 1.1132, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.6487804878048781, | |
| "eval_loss": 0.9314356446266174, | |
| "eval_runtime": 3.4589, | |
| "eval_samples_per_second": 59.268, | |
| "eval_steps_per_second": 2.024, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 4.14, | |
| "learning_rate": 4.603174603174603e-05, | |
| "loss": 1.024, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 0.9416, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 4.97, | |
| "eval_accuracy": 0.6341463414634146, | |
| "eval_loss": 0.8935254216194153, | |
| "eval_runtime": 3.4888, | |
| "eval_samples_per_second": 58.759, | |
| "eval_steps_per_second": 2.006, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 5.52, | |
| "learning_rate": 4.2857142857142856e-05, | |
| "loss": 0.9143, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.6829268292682927, | |
| "eval_loss": 0.8009229898452759, | |
| "eval_runtime": 3.465, | |
| "eval_samples_per_second": 59.163, | |
| "eval_steps_per_second": 2.02, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 6.21, | |
| "learning_rate": 4.126984126984127e-05, | |
| "loss": 0.8868, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 6.9, | |
| "learning_rate": 3.968253968253968e-05, | |
| "loss": 0.8243, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 6.97, | |
| "eval_accuracy": 0.6634146341463415, | |
| "eval_loss": 0.8067137002944946, | |
| "eval_runtime": 3.5324, | |
| "eval_samples_per_second": 58.034, | |
| "eval_steps_per_second": 1.982, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 7.59, | |
| "learning_rate": 3.809523809523809e-05, | |
| "loss": 0.8171, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.6780487804878049, | |
| "eval_loss": 0.7783121466636658, | |
| "eval_runtime": 3.4599, | |
| "eval_samples_per_second": 59.25, | |
| "eval_steps_per_second": 2.023, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 8.28, | |
| "learning_rate": 3.650793650793651e-05, | |
| "loss": 0.8161, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 8.97, | |
| "learning_rate": 3.492063492063492e-05, | |
| "loss": 0.7901, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 8.97, | |
| "eval_accuracy": 0.6585365853658537, | |
| "eval_loss": 0.7871080636978149, | |
| "eval_runtime": 3.4561, | |
| "eval_samples_per_second": 59.316, | |
| "eval_steps_per_second": 2.025, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 9.66, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.7944, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.697560975609756, | |
| "eval_loss": 0.7413551211357117, | |
| "eval_runtime": 3.4339, | |
| "eval_samples_per_second": 59.699, | |
| "eval_steps_per_second": 2.038, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 10.34, | |
| "learning_rate": 3.1746031746031745e-05, | |
| "loss": 0.7669, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 10.97, | |
| "eval_accuracy": 0.7121951219512195, | |
| "eval_loss": 0.6976904273033142, | |
| "eval_runtime": 3.4718, | |
| "eval_samples_per_second": 59.047, | |
| "eval_steps_per_second": 2.016, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 11.03, | |
| "learning_rate": 3.0158730158730158e-05, | |
| "loss": 0.7961, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 11.72, | |
| "learning_rate": 2.857142857142857e-05, | |
| "loss": 0.7478, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.7121951219512195, | |
| "eval_loss": 0.7042645812034607, | |
| "eval_runtime": 3.4485, | |
| "eval_samples_per_second": 59.447, | |
| "eval_steps_per_second": 2.03, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 12.41, | |
| "learning_rate": 2.6984126984126984e-05, | |
| "loss": 0.766, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 12.97, | |
| "eval_accuracy": 0.6585365853658537, | |
| "eval_loss": 0.7778439521789551, | |
| "eval_runtime": 3.6047, | |
| "eval_samples_per_second": 56.87, | |
| "eval_steps_per_second": 1.942, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 13.1, | |
| "learning_rate": 2.5396825396825397e-05, | |
| "loss": 0.7691, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 13.79, | |
| "learning_rate": 2.380952380952381e-05, | |
| "loss": 0.7322, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.6780487804878049, | |
| "eval_loss": 0.7503620386123657, | |
| "eval_runtime": 3.4523, | |
| "eval_samples_per_second": 59.381, | |
| "eval_steps_per_second": 2.028, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 14.48, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.7242, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 14.97, | |
| "eval_accuracy": 0.6829268292682927, | |
| "eval_loss": 0.7290918827056885, | |
| "eval_runtime": 3.5373, | |
| "eval_samples_per_second": 57.954, | |
| "eval_steps_per_second": 1.979, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 15.17, | |
| "learning_rate": 2.0634920634920636e-05, | |
| "loss": 0.7172, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 15.86, | |
| "learning_rate": 1.9047619047619046e-05, | |
| "loss": 0.7554, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.6634146341463415, | |
| "eval_loss": 0.7694374918937683, | |
| "eval_runtime": 3.4718, | |
| "eval_samples_per_second": 59.047, | |
| "eval_steps_per_second": 2.016, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 16.55, | |
| "learning_rate": 1.746031746031746e-05, | |
| "loss": 0.7422, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 16.97, | |
| "eval_accuracy": 0.6829268292682927, | |
| "eval_loss": 0.7568630576133728, | |
| "eval_runtime": 3.5281, | |
| "eval_samples_per_second": 58.104, | |
| "eval_steps_per_second": 1.984, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 17.24, | |
| "learning_rate": 1.5873015873015872e-05, | |
| "loss": 0.7324, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 17.93, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 0.7292, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.6780487804878049, | |
| "eval_loss": 0.7389385104179382, | |
| "eval_runtime": 3.4574, | |
| "eval_samples_per_second": 59.292, | |
| "eval_steps_per_second": 2.025, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 18.62, | |
| "learning_rate": 1.2698412698412699e-05, | |
| "loss": 0.7354, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 18.97, | |
| "eval_accuracy": 0.7121951219512195, | |
| "eval_loss": 0.668440580368042, | |
| "eval_runtime": 3.6943, | |
| "eval_samples_per_second": 55.491, | |
| "eval_steps_per_second": 1.895, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 19.31, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 0.7274, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 9.523809523809523e-06, | |
| "loss": 0.6847, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.7121951219512195, | |
| "eval_loss": 0.6821430325508118, | |
| "eval_runtime": 3.4768, | |
| "eval_samples_per_second": 58.962, | |
| "eval_steps_per_second": 2.013, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 20.69, | |
| "learning_rate": 7.936507936507936e-06, | |
| "loss": 0.7231, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 20.97, | |
| "eval_accuracy": 0.7024390243902439, | |
| "eval_loss": 0.6839069128036499, | |
| "eval_runtime": 3.53, | |
| "eval_samples_per_second": 58.074, | |
| "eval_steps_per_second": 1.983, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 21.38, | |
| "learning_rate": 6.349206349206349e-06, | |
| "loss": 0.6962, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_accuracy": 0.6878048780487804, | |
| "eval_loss": 0.6958089470863342, | |
| "eval_runtime": 3.4679, | |
| "eval_samples_per_second": 59.113, | |
| "eval_steps_per_second": 2.018, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 22.07, | |
| "learning_rate": 4.7619047619047615e-06, | |
| "loss": 0.6995, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 22.76, | |
| "learning_rate": 3.1746031746031746e-06, | |
| "loss": 0.7079, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 22.97, | |
| "eval_accuracy": 0.6878048780487804, | |
| "eval_loss": 0.7039469480514526, | |
| "eval_runtime": 3.4654, | |
| "eval_samples_per_second": 59.156, | |
| "eval_steps_per_second": 2.02, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 23.45, | |
| "learning_rate": 1.5873015873015873e-06, | |
| "loss": 0.7088, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.6878048780487804, | |
| "eval_loss": 0.6974486112594604, | |
| "eval_runtime": 4.2005, | |
| "eval_samples_per_second": 48.803, | |
| "eval_steps_per_second": 1.666, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 24.14, | |
| "learning_rate": 0.0, | |
| "loss": 0.7106, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 24.14, | |
| "eval_accuracy": 0.6878048780487804, | |
| "eval_loss": 0.6975364089012146, | |
| "eval_runtime": 3.9957, | |
| "eval_samples_per_second": 51.305, | |
| "eval_steps_per_second": 1.752, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 24.14, | |
| "step": 350, | |
| "total_flos": 1.444384721662378e+18, | |
| "train_loss": 0.8573012270246233, | |
| "train_runtime": 2334.0273, | |
| "train_samples_per_second": 19.698, | |
| "train_steps_per_second": 0.15 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 350, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 25, | |
| "save_steps": 500, | |
| "total_flos": 1.444384721662378e+18, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |