| { | |
| "best_metric": 0.6521739130434783, | |
| "best_model_checkpoint": "swinv2-tiny-patch4-window8-256-DMAE-U2\\checkpoint-63", | |
| "epoch": 34.285714285714285, | |
| "eval_steps": 500, | |
| "global_step": 120, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.86, | |
| "eval_accuracy": 0.4782608695652174, | |
| "eval_loss": 1.384318470954895, | |
| "eval_runtime": 0.7692, | |
| "eval_samples_per_second": 59.802, | |
| "eval_steps_per_second": 3.9, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.3617327213287354, | |
| "eval_runtime": 0.7541, | |
| "eval_samples_per_second": 61.001, | |
| "eval_steps_per_second": 3.978, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 3.37719298245614e-05, | |
| "loss": 1.3721, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.3210632801055908, | |
| "eval_runtime": 0.7782, | |
| "eval_samples_per_second": 59.112, | |
| "eval_steps_per_second": 3.855, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.2598481178283691, | |
| "eval_runtime": 0.7401, | |
| "eval_samples_per_second": 62.152, | |
| "eval_steps_per_second": 4.053, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 4.86, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.2250741720199585, | |
| "eval_runtime": 0.8846, | |
| "eval_samples_per_second": 51.999, | |
| "eval_steps_per_second": 3.391, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 5.71, | |
| "learning_rate": 3.070175438596491e-05, | |
| "loss": 1.25, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.209437608718872, | |
| "eval_runtime": 0.7856, | |
| "eval_samples_per_second": 58.558, | |
| "eval_steps_per_second": 3.819, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 6.86, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.2126904726028442, | |
| "eval_runtime": 0.7803, | |
| "eval_samples_per_second": 58.952, | |
| "eval_steps_per_second": 3.845, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.2118207216262817, | |
| "eval_runtime": 0.7538, | |
| "eval_samples_per_second": 61.026, | |
| "eval_steps_per_second": 3.98, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 8.57, | |
| "learning_rate": 2.763157894736842e-05, | |
| "loss": 1.1902, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 8.86, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.203112006187439, | |
| "eval_runtime": 0.776, | |
| "eval_samples_per_second": 59.278, | |
| "eval_steps_per_second": 3.866, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.1936233043670654, | |
| "eval_runtime": 0.746, | |
| "eval_samples_per_second": 61.659, | |
| "eval_steps_per_second": 4.021, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 10.86, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.1829485893249512, | |
| "eval_runtime": 0.794, | |
| "eval_samples_per_second": 57.938, | |
| "eval_steps_per_second": 3.779, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 11.43, | |
| "learning_rate": 2.4561403508771925e-05, | |
| "loss": 1.1472, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.156883955001831, | |
| "eval_runtime": 0.744, | |
| "eval_samples_per_second": 61.827, | |
| "eval_steps_per_second": 4.032, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 12.86, | |
| "eval_accuracy": 0.45652173913043476, | |
| "eval_loss": 1.1431748867034912, | |
| "eval_runtime": 0.7287, | |
| "eval_samples_per_second": 63.129, | |
| "eval_steps_per_second": 4.117, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.4782608695652174, | |
| "eval_loss": 1.1357367038726807, | |
| "eval_runtime": 0.7642, | |
| "eval_samples_per_second": 60.196, | |
| "eval_steps_per_second": 3.926, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 14.29, | |
| "learning_rate": 2.1491228070175438e-05, | |
| "loss": 1.1495, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 14.86, | |
| "eval_accuracy": 0.5, | |
| "eval_loss": 1.1177672147750854, | |
| "eval_runtime": 0.7578, | |
| "eval_samples_per_second": 60.705, | |
| "eval_steps_per_second": 3.959, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.5217391304347826, | |
| "eval_loss": 1.0903263092041016, | |
| "eval_runtime": 0.77, | |
| "eval_samples_per_second": 59.737, | |
| "eval_steps_per_second": 3.896, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 16.86, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 1.0714055299758911, | |
| "eval_runtime": 0.7702, | |
| "eval_samples_per_second": 59.726, | |
| "eval_steps_per_second": 3.895, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 17.14, | |
| "learning_rate": 1.8421052631578944e-05, | |
| "loss": 1.0824, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.0453197956085205, | |
| "eval_runtime": 0.7803, | |
| "eval_samples_per_second": 58.955, | |
| "eval_steps_per_second": 3.845, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 18.86, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 1.0149754285812378, | |
| "eval_runtime": 0.7572, | |
| "eval_samples_per_second": 60.752, | |
| "eval_steps_per_second": 3.962, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 1.5350877192982453e-05, | |
| "loss": 1.0535, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.9924822449684143, | |
| "eval_runtime": 0.7866, | |
| "eval_samples_per_second": 58.483, | |
| "eval_steps_per_second": 3.814, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 20.86, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 0.9778493642807007, | |
| "eval_runtime": 0.8042, | |
| "eval_samples_per_second": 57.201, | |
| "eval_steps_per_second": 3.731, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.9570326805114746, | |
| "eval_runtime": 0.7982, | |
| "eval_samples_per_second": 57.628, | |
| "eval_steps_per_second": 3.758, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 22.86, | |
| "learning_rate": 1.2280701754385963e-05, | |
| "loss": 0.994, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 22.86, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 0.9441110491752625, | |
| "eval_runtime": 0.7429, | |
| "eval_samples_per_second": 61.919, | |
| "eval_steps_per_second": 4.038, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 0.9246149659156799, | |
| "eval_runtime": 0.7415, | |
| "eval_samples_per_second": 62.033, | |
| "eval_steps_per_second": 4.046, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 24.86, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 0.9094782471656799, | |
| "eval_runtime": 0.755, | |
| "eval_samples_per_second": 60.928, | |
| "eval_steps_per_second": 3.974, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 25.71, | |
| "learning_rate": 9.210526315789472e-06, | |
| "loss": 0.9554, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_accuracy": 0.6304347826086957, | |
| "eval_loss": 0.8936623334884644, | |
| "eval_runtime": 0.7873, | |
| "eval_samples_per_second": 58.43, | |
| "eval_steps_per_second": 3.811, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 26.86, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8924787044525146, | |
| "eval_runtime": 0.7504, | |
| "eval_samples_per_second": 61.301, | |
| "eval_steps_per_second": 3.998, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8886207938194275, | |
| "eval_runtime": 0.748, | |
| "eval_samples_per_second": 61.494, | |
| "eval_steps_per_second": 4.01, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 28.57, | |
| "learning_rate": 6.140350877192981e-06, | |
| "loss": 0.953, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 28.86, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8804457187652588, | |
| "eval_runtime": 0.7633, | |
| "eval_samples_per_second": 60.262, | |
| "eval_steps_per_second": 3.93, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8744495511054993, | |
| "eval_runtime": 0.76, | |
| "eval_samples_per_second": 60.524, | |
| "eval_steps_per_second": 3.947, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 30.86, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.875434398651123, | |
| "eval_runtime": 0.8019, | |
| "eval_samples_per_second": 57.364, | |
| "eval_steps_per_second": 3.741, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 31.43, | |
| "learning_rate": 3.0701754385964907e-06, | |
| "loss": 0.9092, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8732549548149109, | |
| "eval_runtime": 0.8029, | |
| "eval_samples_per_second": 57.294, | |
| "eval_steps_per_second": 3.737, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 32.86, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8725218772888184, | |
| "eval_runtime": 0.8099, | |
| "eval_samples_per_second": 56.796, | |
| "eval_steps_per_second": 3.704, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8721739649772644, | |
| "eval_runtime": 0.7302, | |
| "eval_samples_per_second": 62.996, | |
| "eval_steps_per_second": 4.108, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 34.29, | |
| "learning_rate": 0.0, | |
| "loss": 0.9247, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 34.29, | |
| "eval_accuracy": 0.6521739130434783, | |
| "eval_loss": 0.8720169067382812, | |
| "eval_runtime": 0.7717, | |
| "eval_samples_per_second": 59.608, | |
| "eval_steps_per_second": 3.887, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 34.29, | |
| "step": 120, | |
| "total_flos": 2.3770905934823424e+17, | |
| "train_loss": 1.0817648808161417, | |
| "train_runtime": 199.6882, | |
| "train_samples_per_second": 42.667, | |
| "train_steps_per_second": 0.601 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 120, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 40, | |
| "save_steps": 500, | |
| "total_flos": 2.3770905934823424e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |