| { | |
| "best_metric": 0.9879873394966125, | |
| "best_model_checkpoint": "Transformers/vit-base-clothing-leafs-example-full-simple_highres/checkpoint-20000", | |
| "epoch": 4.0, | |
| "global_step": 28540, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.4124036440084092e-05, | |
| "loss": 2.0202, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_accuracy": 0.6338309503784693, | |
| "eval_loss": 1.4968905448913574, | |
| "eval_runtime": 393.745, | |
| "eval_samples_per_second": 72.473, | |
| "eval_steps_per_second": 9.059, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 2.32489488437281e-05, | |
| "loss": 1.3694, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "eval_accuracy": 0.6647392767031118, | |
| "eval_loss": 1.2786486148834229, | |
| "eval_runtime": 367.4489, | |
| "eval_samples_per_second": 77.66, | |
| "eval_steps_per_second": 9.707, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 2.237386124737211e-05, | |
| "loss": 1.2063, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "eval_accuracy": 0.6793523969722456, | |
| "eval_loss": 1.1788005828857422, | |
| "eval_runtime": 365.4215, | |
| "eval_samples_per_second": 78.091, | |
| "eval_steps_per_second": 9.761, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 2.1497897687456204e-05, | |
| "loss": 1.1544, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "eval_accuracy": 0.6855550883095037, | |
| "eval_loss": 1.1320449113845825, | |
| "eval_runtime": 363.0958, | |
| "eval_samples_per_second": 78.591, | |
| "eval_steps_per_second": 9.824, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 2.0622810091100212e-05, | |
| "loss": 1.1089, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "eval_accuracy": 0.6867465657415195, | |
| "eval_loss": 1.1020501852035522, | |
| "eval_runtime": 361.1213, | |
| "eval_samples_per_second": 79.021, | |
| "eval_steps_per_second": 9.878, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.9746846531184303e-05, | |
| "loss": 1.0681, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "eval_accuracy": 0.6935449957947856, | |
| "eval_loss": 1.0774946212768555, | |
| "eval_runtime": 364.542, | |
| "eval_samples_per_second": 78.279, | |
| "eval_steps_per_second": 9.785, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.8870882971268398e-05, | |
| "loss": 1.0483, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "eval_accuracy": 0.7006237734791141, | |
| "eval_loss": 1.0460755825042725, | |
| "eval_runtime": 359.7119, | |
| "eval_samples_per_second": 79.33, | |
| "eval_steps_per_second": 9.916, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 1.7995795374912406e-05, | |
| "loss": 0.9591, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "eval_accuracy": 0.7022007289038408, | |
| "eval_loss": 1.0398491621017456, | |
| "eval_runtime": 363.8681, | |
| "eval_samples_per_second": 78.424, | |
| "eval_steps_per_second": 9.803, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 1.7119831814996497e-05, | |
| "loss": 0.9541, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "eval_accuracy": 0.6981356882534343, | |
| "eval_loss": 1.0423301458358765, | |
| "eval_runtime": 365.8278, | |
| "eval_samples_per_second": 78.004, | |
| "eval_steps_per_second": 9.75, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 1.624386825508059e-05, | |
| "loss": 0.9382, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "eval_accuracy": 0.7013947294645361, | |
| "eval_loss": 1.0322409868240356, | |
| "eval_runtime": 359.094, | |
| "eval_samples_per_second": 79.467, | |
| "eval_steps_per_second": 9.933, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.5369656622284514e-05, | |
| "loss": 0.9363, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "eval_accuracy": 0.7019554247266611, | |
| "eval_loss": 1.0300703048706055, | |
| "eval_runtime": 396.2796, | |
| "eval_samples_per_second": 72.01, | |
| "eval_steps_per_second": 9.001, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 1.4493693062368607e-05, | |
| "loss": 0.9199, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "eval_accuracy": 0.7105761143818334, | |
| "eval_loss": 1.0079022645950317, | |
| "eval_runtime": 360.6707, | |
| "eval_samples_per_second": 79.119, | |
| "eval_steps_per_second": 9.89, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 1.3617729502452698e-05, | |
| "loss": 0.919, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "eval_accuracy": 0.7119778525371461, | |
| "eval_loss": 0.9972480535507202, | |
| "eval_runtime": 361.6963, | |
| "eval_samples_per_second": 78.895, | |
| "eval_steps_per_second": 9.862, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 1.2741765942536791e-05, | |
| "loss": 0.9203, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "eval_accuracy": 0.7095948976731147, | |
| "eval_loss": 1.0011298656463623, | |
| "eval_runtime": 358.1777, | |
| "eval_samples_per_second": 79.67, | |
| "eval_steps_per_second": 9.959, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 1.1865802382620882e-05, | |
| "loss": 0.8377, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "eval_accuracy": 0.7146061115783572, | |
| "eval_loss": 0.9911500811576843, | |
| "eval_runtime": 423.0343, | |
| "eval_samples_per_second": 67.456, | |
| "eval_steps_per_second": 8.432, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 1.0991590749824808e-05, | |
| "loss": 0.8148, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "eval_accuracy": 0.7121180263526773, | |
| "eval_loss": 0.9991061091423035, | |
| "eval_runtime": 345.3108, | |
| "eval_samples_per_second": 82.639, | |
| "eval_steps_per_second": 10.33, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 1.01156271899089e-05, | |
| "loss": 0.8153, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "eval_accuracy": 0.7101906363891225, | |
| "eval_loss": 1.007048487663269, | |
| "eval_runtime": 342.0626, | |
| "eval_samples_per_second": 83.423, | |
| "eval_steps_per_second": 10.428, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 9.239663629992992e-06, | |
| "loss": 0.8004, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "eval_accuracy": 0.7154121110176619, | |
| "eval_loss": 0.9979108572006226, | |
| "eval_runtime": 365.4895, | |
| "eval_samples_per_second": 78.076, | |
| "eval_steps_per_second": 9.76, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "learning_rate": 8.363700070077085e-06, | |
| "loss": 0.7937, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "eval_accuracy": 0.7135548079618728, | |
| "eval_loss": 1.0022467374801636, | |
| "eval_runtime": 497.9936, | |
| "eval_samples_per_second": 57.302, | |
| "eval_steps_per_second": 7.163, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 7.487736510161178e-06, | |
| "loss": 0.7989, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "eval_accuracy": 0.7166386319035604, | |
| "eval_loss": 0.9879873394966125, | |
| "eval_runtime": 369.3274, | |
| "eval_samples_per_second": 77.265, | |
| "eval_steps_per_second": 9.658, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 6.612648913805186e-06, | |
| "loss": 0.7953, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "eval_accuracy": 0.7175497617045136, | |
| "eval_loss": 0.990685760974884, | |
| "eval_runtime": 375.6866, | |
| "eval_samples_per_second": 75.957, | |
| "eval_steps_per_second": 9.495, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 5.736685353889278e-06, | |
| "loss": 0.7576, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "eval_accuracy": 0.7136248948696383, | |
| "eval_loss": 1.0013048648834229, | |
| "eval_runtime": 367.0966, | |
| "eval_samples_per_second": 77.734, | |
| "eval_steps_per_second": 9.717, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 4.861597757533287e-06, | |
| "loss": 0.7018, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "eval_accuracy": 0.7155873282870759, | |
| "eval_loss": 1.0022220611572266, | |
| "eval_runtime": 367.5473, | |
| "eval_samples_per_second": 77.639, | |
| "eval_steps_per_second": 9.705, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 3.986510161177296e-06, | |
| "loss": 0.7127, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "eval_accuracy": 0.7150616764788338, | |
| "eval_loss": 1.0079938173294067, | |
| "eval_runtime": 377.1194, | |
| "eval_samples_per_second": 75.668, | |
| "eval_steps_per_second": 9.459, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 3.1105466012613877e-06, | |
| "loss": 0.6989, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "eval_accuracy": 0.7159377628259042, | |
| "eval_loss": 1.0025221109390259, | |
| "eval_runtime": 358.2569, | |
| "eval_samples_per_second": 79.652, | |
| "eval_steps_per_second": 9.957, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 2.23458304134548e-06, | |
| "loss": 0.702, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "eval_accuracy": 0.7167437622652089, | |
| "eval_loss": 1.008726954460144, | |
| "eval_runtime": 368.5339, | |
| "eval_samples_per_second": 77.431, | |
| "eval_steps_per_second": 9.679, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 1.3594954449894885e-06, | |
| "loss": 0.7122, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "eval_accuracy": 0.7158676759181385, | |
| "eval_loss": 1.0042390823364258, | |
| "eval_runtime": 366.8278, | |
| "eval_samples_per_second": 77.791, | |
| "eval_steps_per_second": 9.724, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 4.83531885073581e-07, | |
| "loss": 0.6986, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "eval_accuracy": 0.7163582842724979, | |
| "eval_loss": 1.0016725063323975, | |
| "eval_runtime": 363.5122, | |
| "eval_samples_per_second": 78.501, | |
| "eval_steps_per_second": 9.813, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "step": 28540, | |
| "total_flos": 7.0790801124097475e+19, | |
| "train_loss": 0.9299831045534365, | |
| "train_runtime": 25938.6639, | |
| "train_samples_per_second": 35.205, | |
| "train_steps_per_second": 1.1 | |
| } | |
| ], | |
| "max_steps": 28540, | |
| "num_train_epochs": 4, | |
| "total_flos": 7.0790801124097475e+19, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |