| { | |
| "best_metric": 0.8291605301914581, | |
| "best_model_checkpoint": "vit-base-patch16-224-finetuned-traffic/checkpoint-143", | |
| "epoch": 4.9214659685863875, | |
| "eval_steps": 500, | |
| "global_step": 235, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2094240837696335, | |
| "grad_norm": 2.4012229442596436, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 1.2731, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.418848167539267, | |
| "grad_norm": 1.9113624095916748, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 0.9879, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.6282722513089005, | |
| "grad_norm": 1.7040202617645264, | |
| "learning_rate": 4.857819905213271e-05, | |
| "loss": 0.762, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.837696335078534, | |
| "grad_norm": 1.6665043830871582, | |
| "learning_rate": 4.620853080568721e-05, | |
| "loss": 0.6282, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9842931937172775, | |
| "eval_accuracy": 0.7643593519882179, | |
| "eval_f1": 0.6525442813120799, | |
| "eval_loss": 0.5724892020225525, | |
| "eval_precision": 0.7933178666588052, | |
| "eval_recall": 0.5918231315905315, | |
| "eval_runtime": 8.8153, | |
| "eval_samples_per_second": 77.025, | |
| "eval_steps_per_second": 2.496, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.0471204188481675, | |
| "grad_norm": 1.8398358821868896, | |
| "learning_rate": 4.383886255924171e-05, | |
| "loss": 0.5637, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.256544502617801, | |
| "grad_norm": 1.4762039184570312, | |
| "learning_rate": 4.146919431279621e-05, | |
| "loss": 0.4938, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.4659685863874345, | |
| "grad_norm": 2.8683714866638184, | |
| "learning_rate": 3.909952606635071e-05, | |
| "loss": 0.477, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.675392670157068, | |
| "grad_norm": 1.5742051601409912, | |
| "learning_rate": 3.672985781990522e-05, | |
| "loss": 0.4335, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.8848167539267016, | |
| "grad_norm": 2.7039575576782227, | |
| "learning_rate": 3.4360189573459716e-05, | |
| "loss": 0.4486, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.9895287958115184, | |
| "eval_accuracy": 0.801178203240059, | |
| "eval_f1": 0.7213482384744982, | |
| "eval_loss": 0.46302151679992676, | |
| "eval_precision": 0.7964021663408166, | |
| "eval_recall": 0.682381066062999, | |
| "eval_runtime": 8.91, | |
| "eval_samples_per_second": 76.206, | |
| "eval_steps_per_second": 2.469, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 2.094240837696335, | |
| "grad_norm": 2.257824182510376, | |
| "learning_rate": 3.1990521327014215e-05, | |
| "loss": 0.3847, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.303664921465969, | |
| "grad_norm": 1.9415006637573242, | |
| "learning_rate": 2.962085308056872e-05, | |
| "loss": 0.3028, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.513089005235602, | |
| "grad_norm": 1.7105799913406372, | |
| "learning_rate": 2.7251184834123224e-05, | |
| "loss": 0.3271, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.7225130890052354, | |
| "grad_norm": 2.8020598888397217, | |
| "learning_rate": 2.4881516587677726e-05, | |
| "loss": 0.3494, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.931937172774869, | |
| "grad_norm": 1.7589322328567505, | |
| "learning_rate": 2.251184834123223e-05, | |
| "loss": 0.3285, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.994764397905759, | |
| "eval_accuracy": 0.8291605301914581, | |
| "eval_f1": 0.7721255592297218, | |
| "eval_loss": 0.4393855333328247, | |
| "eval_precision": 0.823206019483298, | |
| "eval_recall": 0.7366059517474952, | |
| "eval_runtime": 9.0269, | |
| "eval_samples_per_second": 75.219, | |
| "eval_steps_per_second": 2.437, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 3.141361256544503, | |
| "grad_norm": 1.3418846130371094, | |
| "learning_rate": 2.014218009478673e-05, | |
| "loss": 0.2621, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 3.350785340314136, | |
| "grad_norm": 1.225538969039917, | |
| "learning_rate": 1.7772511848341233e-05, | |
| "loss": 0.2541, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 3.5602094240837694, | |
| "grad_norm": 1.6284271478652954, | |
| "learning_rate": 1.5402843601895736e-05, | |
| "loss": 0.2234, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 3.769633507853403, | |
| "grad_norm": 1.5704679489135742, | |
| "learning_rate": 1.3033175355450238e-05, | |
| "loss": 0.2444, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 3.979057591623037, | |
| "grad_norm": 1.8943076133728027, | |
| "learning_rate": 1.066350710900474e-05, | |
| "loss": 0.2391, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.8114874815905744, | |
| "eval_f1": 0.7554988545936505, | |
| "eval_loss": 0.43024498224258423, | |
| "eval_precision": 0.7941367088101344, | |
| "eval_recall": 0.7332808654662242, | |
| "eval_runtime": 8.9879, | |
| "eval_samples_per_second": 75.546, | |
| "eval_steps_per_second": 2.448, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 4.18848167539267, | |
| "grad_norm": 2.159135580062866, | |
| "learning_rate": 8.293838862559241e-06, | |
| "loss": 0.187, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 4.397905759162303, | |
| "grad_norm": 1.2371037006378174, | |
| "learning_rate": 5.924170616113745e-06, | |
| "loss": 0.1948, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 4.607329842931938, | |
| "grad_norm": 1.2540051937103271, | |
| "learning_rate": 3.5545023696682464e-06, | |
| "loss": 0.1777, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 4.816753926701571, | |
| "grad_norm": 1.5002622604370117, | |
| "learning_rate": 1.1848341232227488e-06, | |
| "loss": 0.1814, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 4.9214659685863875, | |
| "eval_accuracy": 0.8217967599410898, | |
| "eval_f1": 0.7631453012448455, | |
| "eval_loss": 0.43649259209632874, | |
| "eval_precision": 0.7993023457173745, | |
| "eval_recall": 0.7362480354321321, | |
| "eval_runtime": 9.0379, | |
| "eval_samples_per_second": 75.128, | |
| "eval_steps_per_second": 2.434, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 4.9214659685863875, | |
| "step": 235, | |
| "total_flos": 2.328211069271507e+18, | |
| "train_loss": 0.4179329159411978, | |
| "train_runtime": 724.011, | |
| "train_samples_per_second": 42.147, | |
| "train_steps_per_second": 0.325 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 235, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 2.328211069271507e+18, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |