| { | |
| "best_metric": 0.4106145251396648, | |
| "best_model_checkpoint": "swinv2-base-patch4-window8-256-for-pre_evaluation/checkpoint-320", | |
| "epoch": 30.0, | |
| "eval_steps": 500, | |
| "global_step": 480, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 1.0416666666666668e-05, | |
| "loss": 1.6064, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.30726256983240224, | |
| "eval_loss": 1.518856406211853, | |
| "eval_runtime": 7.5492, | |
| "eval_samples_per_second": 47.423, | |
| "eval_steps_per_second": 1.59, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 1.5218, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 3.125e-05, | |
| "loss": 1.5058, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.30726256983240224, | |
| "eval_loss": 1.505635380744934, | |
| "eval_runtime": 8.0578, | |
| "eval_samples_per_second": 44.429, | |
| "eval_steps_per_second": 1.489, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 1.5176, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.29608938547486036, | |
| "eval_loss": 1.5175888538360596, | |
| "eval_runtime": 7.6326, | |
| "eval_samples_per_second": 46.904, | |
| "eval_steps_per_second": 1.572, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 4.976851851851852e-05, | |
| "loss": 1.4809, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 4.8611111111111115e-05, | |
| "loss": 1.4883, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.30726256983240224, | |
| "eval_loss": 1.5129534006118774, | |
| "eval_runtime": 7.9777, | |
| "eval_samples_per_second": 44.875, | |
| "eval_steps_per_second": 1.504, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 4.38, | |
| "learning_rate": 4.745370370370371e-05, | |
| "loss": 1.4625, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 4.62962962962963e-05, | |
| "loss": 1.4446, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.329608938547486, | |
| "eval_loss": 1.4540027379989624, | |
| "eval_runtime": 8.0262, | |
| "eval_samples_per_second": 44.604, | |
| "eval_steps_per_second": 1.495, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 5.62, | |
| "learning_rate": 4.5138888888888894e-05, | |
| "loss": 1.4568, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.31564245810055863, | |
| "eval_loss": 1.5154197216033936, | |
| "eval_runtime": 7.416, | |
| "eval_samples_per_second": 48.274, | |
| "eval_steps_per_second": 1.618, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "learning_rate": 4.3981481481481486e-05, | |
| "loss": 1.4427, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 6.88, | |
| "learning_rate": 4.282407407407408e-05, | |
| "loss": 1.4106, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.388268156424581, | |
| "eval_loss": 1.4271849393844604, | |
| "eval_runtime": 7.9635, | |
| "eval_samples_per_second": 44.955, | |
| "eval_steps_per_second": 1.507, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 1.3804, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.3743016759776536, | |
| "eval_loss": 1.4184668064117432, | |
| "eval_runtime": 7.541, | |
| "eval_samples_per_second": 47.474, | |
| "eval_steps_per_second": 1.591, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 8.12, | |
| "learning_rate": 4.0509259259259265e-05, | |
| "loss": 1.3723, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 8.75, | |
| "learning_rate": 3.935185185185186e-05, | |
| "loss": 1.3725, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.39106145251396646, | |
| "eval_loss": 1.3943482637405396, | |
| "eval_runtime": 7.6226, | |
| "eval_samples_per_second": 46.965, | |
| "eval_steps_per_second": 1.574, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 9.38, | |
| "learning_rate": 3.8194444444444444e-05, | |
| "loss": 1.3603, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 3.7037037037037037e-05, | |
| "loss": 1.3441, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.4022346368715084, | |
| "eval_loss": 1.4510265588760376, | |
| "eval_runtime": 7.9194, | |
| "eval_samples_per_second": 45.206, | |
| "eval_steps_per_second": 1.515, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 10.62, | |
| "learning_rate": 3.587962962962963e-05, | |
| "loss": 1.3335, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_accuracy": 0.38268156424581007, | |
| "eval_loss": 1.4336817264556885, | |
| "eval_runtime": 8.1148, | |
| "eval_samples_per_second": 44.117, | |
| "eval_steps_per_second": 1.479, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 11.25, | |
| "learning_rate": 3.472222222222222e-05, | |
| "loss": 1.3375, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 11.88, | |
| "learning_rate": 3.3564814814814815e-05, | |
| "loss": 1.3055, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.3854748603351955, | |
| "eval_loss": 1.46334969997406, | |
| "eval_runtime": 8.0804, | |
| "eval_samples_per_second": 44.305, | |
| "eval_steps_per_second": 1.485, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "learning_rate": 3.240740740740741e-05, | |
| "loss": 1.3303, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_accuracy": 0.388268156424581, | |
| "eval_loss": 1.4673736095428467, | |
| "eval_runtime": 7.7078, | |
| "eval_samples_per_second": 46.446, | |
| "eval_steps_per_second": 1.557, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 13.12, | |
| "learning_rate": 3.125e-05, | |
| "loss": 1.275, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 13.75, | |
| "learning_rate": 3.0092592592592593e-05, | |
| "loss": 1.2882, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.39106145251396646, | |
| "eval_loss": 1.4388198852539062, | |
| "eval_runtime": 7.5454, | |
| "eval_samples_per_second": 47.446, | |
| "eval_steps_per_second": 1.59, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 14.38, | |
| "learning_rate": 2.8935185185185186e-05, | |
| "loss": 1.2785, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 1.2362, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_accuracy": 0.3854748603351955, | |
| "eval_loss": 1.4675506353378296, | |
| "eval_runtime": 7.405, | |
| "eval_samples_per_second": 48.346, | |
| "eval_steps_per_second": 1.621, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 15.62, | |
| "learning_rate": 2.6620370370370372e-05, | |
| "loss": 1.2572, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.37988826815642457, | |
| "eval_loss": 1.4805017709732056, | |
| "eval_runtime": 7.6595, | |
| "eval_samples_per_second": 46.74, | |
| "eval_steps_per_second": 1.567, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 16.25, | |
| "learning_rate": 2.5462962962962965e-05, | |
| "loss": 1.2553, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 16.88, | |
| "learning_rate": 2.4305555555555558e-05, | |
| "loss": 1.2164, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_accuracy": 0.39385474860335196, | |
| "eval_loss": 1.4717062711715698, | |
| "eval_runtime": 7.4797, | |
| "eval_samples_per_second": 47.863, | |
| "eval_steps_per_second": 1.604, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 17.5, | |
| "learning_rate": 2.314814814814815e-05, | |
| "loss": 1.221, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.40782122905027934, | |
| "eval_loss": 1.4354026317596436, | |
| "eval_runtime": 7.9665, | |
| "eval_samples_per_second": 44.938, | |
| "eval_steps_per_second": 1.506, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 18.12, | |
| "learning_rate": 2.1990740740740743e-05, | |
| "loss": 1.1765, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 18.75, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 1.1713, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_accuracy": 0.40782122905027934, | |
| "eval_loss": 1.4835941791534424, | |
| "eval_runtime": 7.9097, | |
| "eval_samples_per_second": 45.261, | |
| "eval_steps_per_second": 1.517, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 19.38, | |
| "learning_rate": 1.967592592592593e-05, | |
| "loss": 1.1669, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 1.8518518518518518e-05, | |
| "loss": 1.18, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.4106145251396648, | |
| "eval_loss": 1.487263798713684, | |
| "eval_runtime": 7.8923, | |
| "eval_samples_per_second": 45.361, | |
| "eval_steps_per_second": 1.52, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 20.62, | |
| "learning_rate": 1.736111111111111e-05, | |
| "loss": 1.1349, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_accuracy": 0.3854748603351955, | |
| "eval_loss": 1.4852601289749146, | |
| "eval_runtime": 8.006, | |
| "eval_samples_per_second": 44.716, | |
| "eval_steps_per_second": 1.499, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 21.25, | |
| "learning_rate": 1.6203703703703704e-05, | |
| "loss": 1.1504, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 21.88, | |
| "learning_rate": 1.5046296296296297e-05, | |
| "loss": 1.1138, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_accuracy": 0.39664804469273746, | |
| "eval_loss": 1.4926708936691284, | |
| "eval_runtime": 7.9632, | |
| "eval_samples_per_second": 44.957, | |
| "eval_steps_per_second": 1.507, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 22.5, | |
| "learning_rate": 1.388888888888889e-05, | |
| "loss": 1.1402, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_accuracy": 0.3994413407821229, | |
| "eval_loss": 1.4671615362167358, | |
| "eval_runtime": 7.9053, | |
| "eval_samples_per_second": 45.286, | |
| "eval_steps_per_second": 1.518, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 23.12, | |
| "learning_rate": 1.2731481481481482e-05, | |
| "loss": 1.1024, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 23.75, | |
| "learning_rate": 1.1574074074074075e-05, | |
| "loss": 1.1183, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.4022346368715084, | |
| "eval_loss": 1.5032563209533691, | |
| "eval_runtime": 7.4671, | |
| "eval_samples_per_second": 47.944, | |
| "eval_steps_per_second": 1.607, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 24.38, | |
| "learning_rate": 1.0416666666666668e-05, | |
| "loss": 1.0849, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "learning_rate": 9.259259259259259e-06, | |
| "loss": 1.0834, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_accuracy": 0.3854748603351955, | |
| "eval_loss": 1.544798731803894, | |
| "eval_runtime": 7.7993, | |
| "eval_samples_per_second": 45.902, | |
| "eval_steps_per_second": 1.539, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 25.62, | |
| "learning_rate": 8.101851851851852e-06, | |
| "loss": 1.0515, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_accuracy": 0.39385474860335196, | |
| "eval_loss": 1.5130928754806519, | |
| "eval_runtime": 8.0341, | |
| "eval_samples_per_second": 44.56, | |
| "eval_steps_per_second": 1.494, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 26.25, | |
| "learning_rate": 6.944444444444445e-06, | |
| "loss": 1.0949, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 26.88, | |
| "learning_rate": 5.787037037037038e-06, | |
| "loss": 1.0745, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "eval_accuracy": 0.38268156424581007, | |
| "eval_loss": 1.5313720703125, | |
| "eval_runtime": 7.7853, | |
| "eval_samples_per_second": 45.984, | |
| "eval_steps_per_second": 1.541, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 27.5, | |
| "learning_rate": 4.6296296296296296e-06, | |
| "loss": 1.0332, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_accuracy": 0.39385474860335196, | |
| "eval_loss": 1.54736328125, | |
| "eval_runtime": 7.8218, | |
| "eval_samples_per_second": 45.77, | |
| "eval_steps_per_second": 1.534, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 28.12, | |
| "learning_rate": 3.4722222222222224e-06, | |
| "loss": 1.0432, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 28.75, | |
| "learning_rate": 2.3148148148148148e-06, | |
| "loss": 1.0679, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "eval_accuracy": 0.3854748603351955, | |
| "eval_loss": 1.5327454805374146, | |
| "eval_runtime": 7.4537, | |
| "eval_samples_per_second": 48.03, | |
| "eval_steps_per_second": 1.61, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 29.38, | |
| "learning_rate": 1.1574074074074074e-06, | |
| "loss": 1.0459, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "learning_rate": 0.0, | |
| "loss": 1.0295, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_accuracy": 0.3854748603351955, | |
| "eval_loss": 1.540163516998291, | |
| "eval_runtime": 7.7331, | |
| "eval_samples_per_second": 46.294, | |
| "eval_steps_per_second": 1.552, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "step": 480, | |
| "total_flos": 6.264395996933652e+18, | |
| "train_loss": 1.2617869913578033, | |
| "train_runtime": 3670.4318, | |
| "train_samples_per_second": 16.649, | |
| "train_steps_per_second": 0.131 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 480, | |
| "num_train_epochs": 30, | |
| "save_steps": 500, | |
| "total_flos": 6.264395996933652e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |