| { |
| "best_metric": 0.962536023054755, |
| "best_model_checkpoint": "./roadwork-swin-finetuned/checkpoint-704", |
| "epoch": 1.0, |
| "eval_steps": 500, |
| "global_step": 704, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.07102272727272728, |
| "grad_norm": 17.786890029907227, |
| "learning_rate": 2.9488636363636363e-05, |
| "loss": 0.416, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.14204545454545456, |
| "grad_norm": 23.79692268371582, |
| "learning_rate": 2.895596590909091e-05, |
| "loss": 0.2916, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.21306818181818182, |
| "grad_norm": 19.02972412109375, |
| "learning_rate": 2.8433948863636362e-05, |
| "loss": 0.2737, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.2840909090909091, |
| "grad_norm": 0.4028087556362152, |
| "learning_rate": 2.790127840909091e-05, |
| "loss": 0.375, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.35511363636363635, |
| "grad_norm": 11.175421714782715, |
| "learning_rate": 2.7368607954545453e-05, |
| "loss": 0.3105, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.42613636363636365, |
| "grad_norm": 14.542905807495117, |
| "learning_rate": 2.68359375e-05, |
| "loss": 0.2578, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.4971590909090909, |
| "grad_norm": 0.4432804584503174, |
| "learning_rate": 2.630326704545455e-05, |
| "loss": 0.3166, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.5681818181818182, |
| "grad_norm": 0.9627830386161804, |
| "learning_rate": 2.5770596590909092e-05, |
| "loss": 0.3289, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.6392045454545454, |
| "grad_norm": 5.985296249389648, |
| "learning_rate": 2.523792613636364e-05, |
| "loss": 0.2113, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.7102272727272727, |
| "grad_norm": 6.082040786743164, |
| "learning_rate": 2.470525568181818e-05, |
| "loss": 0.2488, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.78125, |
| "grad_norm": 0.5786997079849243, |
| "learning_rate": 2.4172585227272728e-05, |
| "loss": 0.2465, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.8522727272727273, |
| "grad_norm": 18.639421463012695, |
| "learning_rate": 2.3639914772727272e-05, |
| "loss": 0.2751, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.9232954545454546, |
| "grad_norm": 15.852656364440918, |
| "learning_rate": 2.310724431818182e-05, |
| "loss": 0.2341, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.9943181818181818, |
| "grad_norm": 31.554357528686523, |
| "learning_rate": 2.2574573863636364e-05, |
| "loss": 0.2625, |
| "step": 700 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_accuracy": 0.9376996805111821, |
| "eval_f1": 0.962536023054755, |
| "eval_loss": 0.24441801011562347, |
| "eval_precision": 0.9597701149425287, |
| "eval_recall": 0.9653179190751445, |
| "eval_runtime": 5.9751, |
| "eval_samples_per_second": 104.769, |
| "eval_steps_per_second": 13.222, |
| "step": 704 |
| } |
| ], |
| "logging_steps": 50, |
| "max_steps": 2816, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 4, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 4.4069375040768e+17, |
| "train_batch_size": 8, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|