| { | |
| "best_metric": 0.9148418491484185, | |
| "best_model_checkpoint": "swin-tiny-patch4-window7-224-Mid-NonMidMarket-Classification/checkpoint-213", | |
| "epoch": 9.836065573770492, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.32786885245901637, | |
| "grad_norm": 4.995051860809326, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 1.0564, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6557377049180327, | |
| "grad_norm": 2.201974868774414, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.5859, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.9836065573770492, | |
| "grad_norm": 2.7302303314208984, | |
| "learning_rate": 5e-05, | |
| "loss": 0.4375, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.9836065573770492, | |
| "eval_accuracy": 0.8564476885644768, | |
| "eval_loss": 0.43852365016937256, | |
| "eval_runtime": 47.0727, | |
| "eval_samples_per_second": 8.731, | |
| "eval_steps_per_second": 0.149, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.3114754098360657, | |
| "grad_norm": 1.9822132587432861, | |
| "learning_rate": 4.814814814814815e-05, | |
| "loss": 0.3956, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.639344262295082, | |
| "grad_norm": 1.4209967851638794, | |
| "learning_rate": 4.62962962962963e-05, | |
| "loss": 0.3696, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.9672131147540983, | |
| "grad_norm": 2.8973567485809326, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 0.3408, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8978102189781022, | |
| "eval_loss": 0.2872111201286316, | |
| "eval_runtime": 46.8601, | |
| "eval_samples_per_second": 8.771, | |
| "eval_steps_per_second": 0.149, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 2.2950819672131146, | |
| "grad_norm": 2.134539842605591, | |
| "learning_rate": 4.259259259259259e-05, | |
| "loss": 0.3242, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.6229508196721314, | |
| "grad_norm": 1.9525718688964844, | |
| "learning_rate": 4.074074074074074e-05, | |
| "loss": 0.3175, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.9508196721311473, | |
| "grad_norm": 2.7107627391815186, | |
| "learning_rate": 3.888888888888889e-05, | |
| "loss": 0.3106, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.9836065573770494, | |
| "eval_accuracy": 0.9099756690997567, | |
| "eval_loss": 0.2597734332084656, | |
| "eval_runtime": 47.0794, | |
| "eval_samples_per_second": 8.73, | |
| "eval_steps_per_second": 0.149, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 3.278688524590164, | |
| "grad_norm": 2.890233278274536, | |
| "learning_rate": 3.7037037037037037e-05, | |
| "loss": 0.2894, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.6065573770491803, | |
| "grad_norm": 2.8648366928100586, | |
| "learning_rate": 3.518518518518519e-05, | |
| "loss": 0.2802, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.9344262295081966, | |
| "grad_norm": 4.32825231552124, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.3167, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9124087591240876, | |
| "eval_loss": 0.26094919443130493, | |
| "eval_runtime": 46.9524, | |
| "eval_samples_per_second": 8.754, | |
| "eval_steps_per_second": 0.149, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 4.262295081967213, | |
| "grad_norm": 2.3360254764556885, | |
| "learning_rate": 3.148148148148148e-05, | |
| "loss": 0.2947, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.590163934426229, | |
| "grad_norm": 2.176058292388916, | |
| "learning_rate": 2.962962962962963e-05, | |
| "loss": 0.2699, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.918032786885246, | |
| "grad_norm": 2.304738998413086, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.2533, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 4.983606557377049, | |
| "eval_accuracy": 0.9075425790754258, | |
| "eval_loss": 0.2426266223192215, | |
| "eval_runtime": 47.2479, | |
| "eval_samples_per_second": 8.699, | |
| "eval_steps_per_second": 0.148, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 5.245901639344262, | |
| "grad_norm": 3.3929357528686523, | |
| "learning_rate": 2.5925925925925925e-05, | |
| "loss": 0.2827, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.573770491803279, | |
| "grad_norm": 2.577345609664917, | |
| "learning_rate": 2.4074074074074074e-05, | |
| "loss": 0.2557, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 5.901639344262295, | |
| "grad_norm": 2.1669633388519287, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.256, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9075425790754258, | |
| "eval_loss": 0.2371838092803955, | |
| "eval_runtime": 47.0773, | |
| "eval_samples_per_second": 8.73, | |
| "eval_steps_per_second": 0.149, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 6.229508196721311, | |
| "grad_norm": 2.977111339569092, | |
| "learning_rate": 2.037037037037037e-05, | |
| "loss": 0.2479, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.557377049180328, | |
| "grad_norm": 2.62300705909729, | |
| "learning_rate": 1.8518518518518518e-05, | |
| "loss": 0.2591, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 6.885245901639344, | |
| "grad_norm": 2.1771183013916016, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.2492, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 6.983606557377049, | |
| "eval_accuracy": 0.9148418491484185, | |
| "eval_loss": 0.2417779415845871, | |
| "eval_runtime": 47.4002, | |
| "eval_samples_per_second": 8.671, | |
| "eval_steps_per_second": 0.148, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 7.213114754098361, | |
| "grad_norm": 2.1204617023468018, | |
| "learning_rate": 1.4814814814814815e-05, | |
| "loss": 0.2281, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 7.540983606557377, | |
| "grad_norm": 2.766522169113159, | |
| "learning_rate": 1.2962962962962962e-05, | |
| "loss": 0.238, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 7.868852459016393, | |
| "grad_norm": 2.7315852642059326, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 0.2364, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.9051094890510949, | |
| "eval_loss": 0.23522743582725525, | |
| "eval_runtime": 47.1934, | |
| "eval_samples_per_second": 8.709, | |
| "eval_steps_per_second": 0.148, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 8.19672131147541, | |
| "grad_norm": 2.156822681427002, | |
| "learning_rate": 9.259259259259259e-06, | |
| "loss": 0.2334, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 8.524590163934427, | |
| "grad_norm": 3.404446840286255, | |
| "learning_rate": 7.4074074074074075e-06, | |
| "loss": 0.2211, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 8.852459016393443, | |
| "grad_norm": 2.5259647369384766, | |
| "learning_rate": 5.555555555555556e-06, | |
| "loss": 0.2301, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 8.98360655737705, | |
| "eval_accuracy": 0.9075425790754258, | |
| "eval_loss": 0.23476606607437134, | |
| "eval_runtime": 46.7257, | |
| "eval_samples_per_second": 8.796, | |
| "eval_steps_per_second": 0.15, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 9.180327868852459, | |
| "grad_norm": 3.3290984630584717, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 0.2253, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 9.508196721311476, | |
| "grad_norm": 1.9141846895217896, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "loss": 0.2201, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 9.836065573770492, | |
| "grad_norm": 2.769473075866699, | |
| "learning_rate": 0.0, | |
| "loss": 0.2255, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 9.836065573770492, | |
| "eval_accuracy": 0.8978102189781022, | |
| "eval_loss": 0.2350464165210724, | |
| "eval_runtime": 46.6731, | |
| "eval_samples_per_second": 8.806, | |
| "eval_steps_per_second": 0.15, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 9.836065573770492, | |
| "step": 300, | |
| "total_flos": 1.9063087726729052e+18, | |
| "train_loss": 0.3150376001993815, | |
| "train_runtime": 8083.4568, | |
| "train_samples_per_second": 9.644, | |
| "train_steps_per_second": 0.037 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 300, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.9063087726729052e+18, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |