| { | |
| "best_metric": 0.8810105949470253, | |
| "best_model_checkpoint": "/kaggle/working/danbooru-effnet/checkpoint-776", | |
| "epoch": 9.971014492753623, | |
| "eval_steps": 500, | |
| "global_step": 860, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 5.8139534883720935e-06, | |
| "loss": 7.541, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 1.1627906976744187e-05, | |
| "loss": 7.3979, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 1.744186046511628e-05, | |
| "loss": 7.1218, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 2.3255813953488374e-05, | |
| "loss": 6.7652, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 2.9069767441860467e-05, | |
| "loss": 6.1784, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 3.488372093023256e-05, | |
| "loss": 5.3812, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 4.0697674418604655e-05, | |
| "loss": 4.271, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 4.651162790697675e-05, | |
| "loss": 2.9823, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.7734311328443357, | |
| "eval_loss": 1.3710215091705322, | |
| "eval_runtime": 24.4306, | |
| "eval_samples_per_second": 50.224, | |
| "eval_steps_per_second": 1.596, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 4.9741602067183466e-05, | |
| "loss": 1.7243, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 4.9095607235142123e-05, | |
| "loss": 0.9988, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 4.8449612403100775e-05, | |
| "loss": 0.6847, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 4.780361757105943e-05, | |
| "loss": 0.6244, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 4.715762273901809e-05, | |
| "loss": 0.5514, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 4.651162790697675e-05, | |
| "loss": 0.5363, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 4.5865633074935406e-05, | |
| "loss": 0.5036, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 4.521963824289406e-05, | |
| "loss": 0.5194, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 4.4573643410852715e-05, | |
| "loss": 0.4826, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "eval_accuracy": 0.8378158109209454, | |
| "eval_loss": 0.4534945785999298, | |
| "eval_runtime": 13.0595, | |
| "eval_samples_per_second": 93.955, | |
| "eval_steps_per_second": 2.986, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 4.392764857881137e-05, | |
| "loss": 0.4388, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 4.328165374677003e-05, | |
| "loss": 0.4391, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 4.263565891472868e-05, | |
| "loss": 0.44, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 4.198966408268734e-05, | |
| "loss": 0.4375, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 4.1343669250646e-05, | |
| "loss": 0.4578, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 4.0697674418604655e-05, | |
| "loss": 0.4513, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "learning_rate": 4.005167958656331e-05, | |
| "loss": 0.4169, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 3.9405684754521964e-05, | |
| "loss": 0.4562, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "eval_accuracy": 0.8524857375713122, | |
| "eval_loss": 0.38659510016441345, | |
| "eval_runtime": 13.3081, | |
| "eval_samples_per_second": 92.199, | |
| "eval_steps_per_second": 2.931, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 3.875968992248062e-05, | |
| "loss": 0.4149, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "learning_rate": 3.811369509043928e-05, | |
| "loss": 0.4, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 3.746770025839794e-05, | |
| "loss": 0.4126, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 3.682170542635659e-05, | |
| "loss": 0.415, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 3.617571059431525e-05, | |
| "loss": 0.4347, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 3.5529715762273905e-05, | |
| "loss": 0.4009, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 3.488372093023256e-05, | |
| "loss": 0.4073, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 3.4237726098191214e-05, | |
| "loss": 0.3998, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 3.359173126614987e-05, | |
| "loss": 0.4149, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.8533007334963325, | |
| "eval_loss": 0.3689855635166168, | |
| "eval_runtime": 13.1979, | |
| "eval_samples_per_second": 92.969, | |
| "eval_steps_per_second": 2.955, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 3.294573643410852e-05, | |
| "loss": 0.3678, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 3.229974160206719e-05, | |
| "loss": 0.3866, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 3.1653746770025845e-05, | |
| "loss": 0.3978, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "learning_rate": 3.1007751937984497e-05, | |
| "loss": 0.3898, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 3.0361757105943154e-05, | |
| "loss": 0.3866, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "learning_rate": 2.971576227390181e-05, | |
| "loss": 0.376, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 2.9069767441860467e-05, | |
| "loss": 0.3798, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 2.842377260981912e-05, | |
| "loss": 0.411, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.3943, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.8687856560717196, | |
| "eval_loss": 0.34427791833877563, | |
| "eval_runtime": 13.0107, | |
| "eval_samples_per_second": 94.307, | |
| "eval_steps_per_second": 2.998, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 5.1, | |
| "learning_rate": 2.7131782945736434e-05, | |
| "loss": 0.3535, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 5.22, | |
| "learning_rate": 2.648578811369509e-05, | |
| "loss": 0.3842, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "learning_rate": 2.5839793281653746e-05, | |
| "loss": 0.353, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 5.45, | |
| "learning_rate": 2.5193798449612404e-05, | |
| "loss": 0.3797, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 5.57, | |
| "learning_rate": 2.4547803617571062e-05, | |
| "loss": 0.378, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 5.68, | |
| "learning_rate": 2.3901808785529716e-05, | |
| "loss": 0.371, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "learning_rate": 2.3255813953488374e-05, | |
| "loss": 0.3416, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.91, | |
| "learning_rate": 2.260981912144703e-05, | |
| "loss": 0.394, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 5.99, | |
| "eval_accuracy": 0.8793806030969845, | |
| "eval_loss": 0.33624228835105896, | |
| "eval_runtime": 13.2837, | |
| "eval_samples_per_second": 92.369, | |
| "eval_steps_per_second": 2.936, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 6.03, | |
| "learning_rate": 2.1963824289405686e-05, | |
| "loss": 0.3905, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 6.14, | |
| "learning_rate": 2.131782945736434e-05, | |
| "loss": 0.3574, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 6.26, | |
| "learning_rate": 2.0671834625323e-05, | |
| "loss": 0.3718, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 6.38, | |
| "learning_rate": 2.0025839793281657e-05, | |
| "loss": 0.3361, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 6.49, | |
| "learning_rate": 1.937984496124031e-05, | |
| "loss": 0.3186, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 6.61, | |
| "learning_rate": 1.873385012919897e-05, | |
| "loss": 0.3667, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 6.72, | |
| "learning_rate": 1.8087855297157624e-05, | |
| "loss": 0.36, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 6.84, | |
| "learning_rate": 1.744186046511628e-05, | |
| "loss": 0.3945, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 6.96, | |
| "learning_rate": 1.6795865633074936e-05, | |
| "loss": 0.3416, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 6.99, | |
| "eval_accuracy": 0.8712306438467807, | |
| "eval_loss": 0.33915451169013977, | |
| "eval_runtime": 13.2457, | |
| "eval_samples_per_second": 92.634, | |
| "eval_steps_per_second": 2.944, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 7.07, | |
| "learning_rate": 1.6149870801033594e-05, | |
| "loss": 0.3618, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 7.19, | |
| "learning_rate": 1.5503875968992248e-05, | |
| "loss": 0.3628, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 7.3, | |
| "learning_rate": 1.4857881136950904e-05, | |
| "loss": 0.3514, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 7.42, | |
| "learning_rate": 1.421188630490956e-05, | |
| "loss": 0.3777, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 7.54, | |
| "learning_rate": 1.3565891472868217e-05, | |
| "loss": 0.3799, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 7.65, | |
| "learning_rate": 1.2919896640826873e-05, | |
| "loss": 0.3875, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 7.77, | |
| "learning_rate": 1.2273901808785531e-05, | |
| "loss": 0.343, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 7.88, | |
| "learning_rate": 1.1627906976744187e-05, | |
| "loss": 0.3477, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 1.0981912144702843e-05, | |
| "loss": 0.3644, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.8753056234718827, | |
| "eval_loss": 0.33345112204551697, | |
| "eval_runtime": 13.3299, | |
| "eval_samples_per_second": 92.048, | |
| "eval_steps_per_second": 2.926, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 8.12, | |
| "learning_rate": 1.03359173126615e-05, | |
| "loss": 0.3701, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 8.23, | |
| "learning_rate": 9.689922480620156e-06, | |
| "loss": 0.3491, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 8.35, | |
| "learning_rate": 9.043927648578812e-06, | |
| "loss": 0.3414, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 8.46, | |
| "learning_rate": 8.397932816537468e-06, | |
| "loss": 0.3403, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 8.58, | |
| "learning_rate": 7.751937984496124e-06, | |
| "loss": 0.3305, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 8.7, | |
| "learning_rate": 7.10594315245478e-06, | |
| "loss": 0.3412, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 8.81, | |
| "learning_rate": 6.4599483204134365e-06, | |
| "loss": 0.3633, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 8.93, | |
| "learning_rate": 5.8139534883720935e-06, | |
| "loss": 0.3581, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.8810105949470253, | |
| "eval_loss": 0.32285985350608826, | |
| "eval_runtime": 13.3317, | |
| "eval_samples_per_second": 92.036, | |
| "eval_steps_per_second": 2.925, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 9.04, | |
| "learning_rate": 5.16795865633075e-06, | |
| "loss": 0.2977, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 9.16, | |
| "learning_rate": 4.521963824289406e-06, | |
| "loss": 0.3475, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 9.28, | |
| "learning_rate": 3.875968992248062e-06, | |
| "loss": 0.3262, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 9.39, | |
| "learning_rate": 3.2299741602067182e-06, | |
| "loss": 0.3532, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 9.51, | |
| "learning_rate": 2.583979328165375e-06, | |
| "loss": 0.353, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 9.62, | |
| "learning_rate": 1.937984496124031e-06, | |
| "loss": 0.3606, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 9.74, | |
| "learning_rate": 1.2919896640826874e-06, | |
| "loss": 0.3519, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 9.86, | |
| "learning_rate": 6.459948320413437e-07, | |
| "loss": 0.3531, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 9.97, | |
| "learning_rate": 0.0, | |
| "loss": 0.3414, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 9.97, | |
| "eval_accuracy": 0.8744906275468622, | |
| "eval_loss": 0.3368578255176544, | |
| "eval_runtime": 14.4016, | |
| "eval_samples_per_second": 85.199, | |
| "eval_steps_per_second": 2.708, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 9.97, | |
| "step": 860, | |
| "total_flos": 2.1815473692672e+18, | |
| "train_loss": 0.9341721257498098, | |
| "train_runtime": 2440.787, | |
| "train_samples_per_second": 45.231, | |
| "train_steps_per_second": 0.352 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 860, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "total_flos": 2.1815473692672e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |