mvit_v2_rwf-2000 / trainer_state.json
DanJoshua's picture
End of training
525762f verified
{
"best_metric": 0.93125,
"best_model_checkpoint": "mvit_v2_rwf-2000/checkpoint-570",
"epoch": 5.125,
"eval_steps": 500,
"global_step": 1140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006578947368421052,
"grad_norm": 36.64044189453125,
"learning_rate": 1.986842105263158e-05,
"loss": 6.5592,
"step": 10
},
{
"epoch": 0.013157894736842105,
"grad_norm": 43.76960372924805,
"learning_rate": 1.9736842105263158e-05,
"loss": 5.0333,
"step": 20
},
{
"epoch": 0.019736842105263157,
"grad_norm": 29.054157257080078,
"learning_rate": 1.960526315789474e-05,
"loss": 3.3536,
"step": 30
},
{
"epoch": 0.02631578947368421,
"grad_norm": 26.586713790893555,
"learning_rate": 1.9473684210526318e-05,
"loss": 2.0178,
"step": 40
},
{
"epoch": 0.03289473684210526,
"grad_norm": 13.628296852111816,
"learning_rate": 1.9342105263157896e-05,
"loss": 1.4669,
"step": 50
},
{
"epoch": 0.039473684210526314,
"grad_norm": 12.267337799072266,
"learning_rate": 1.9210526315789474e-05,
"loss": 0.9024,
"step": 60
},
{
"epoch": 0.046052631578947366,
"grad_norm": 13.692243576049805,
"learning_rate": 1.9078947368421056e-05,
"loss": 0.676,
"step": 70
},
{
"epoch": 0.05263157894736842,
"grad_norm": 15.4345064163208,
"learning_rate": 1.894736842105263e-05,
"loss": 0.8188,
"step": 80
},
{
"epoch": 0.05921052631578947,
"grad_norm": 18.368070602416992,
"learning_rate": 1.8815789473684213e-05,
"loss": 0.6296,
"step": 90
},
{
"epoch": 0.06578947368421052,
"grad_norm": 12.882184982299805,
"learning_rate": 1.868421052631579e-05,
"loss": 0.6014,
"step": 100
},
{
"epoch": 0.07236842105263158,
"grad_norm": 13.11134147644043,
"learning_rate": 1.8552631578947373e-05,
"loss": 0.6352,
"step": 110
},
{
"epoch": 0.07894736842105263,
"grad_norm": 8.479208946228027,
"learning_rate": 1.8421052631578947e-05,
"loss": 0.6688,
"step": 120
},
{
"epoch": 0.08552631578947369,
"grad_norm": 9.394298553466797,
"learning_rate": 1.828947368421053e-05,
"loss": 0.5578,
"step": 130
},
{
"epoch": 0.09210526315789473,
"grad_norm": 13.005159378051758,
"learning_rate": 1.8157894736842107e-05,
"loss": 0.5352,
"step": 140
},
{
"epoch": 0.09868421052631579,
"grad_norm": 21.03794288635254,
"learning_rate": 1.8026315789473685e-05,
"loss": 0.3291,
"step": 150
},
{
"epoch": 0.10526315789473684,
"grad_norm": 20.376361846923828,
"learning_rate": 1.7894736842105264e-05,
"loss": 0.3126,
"step": 160
},
{
"epoch": 0.1118421052631579,
"grad_norm": 9.78145694732666,
"learning_rate": 1.7763157894736845e-05,
"loss": 0.4688,
"step": 170
},
{
"epoch": 0.11842105263157894,
"grad_norm": 10.771368026733398,
"learning_rate": 1.763157894736842e-05,
"loss": 0.3389,
"step": 180
},
{
"epoch": 0.125,
"grad_norm": 1.7945446968078613,
"learning_rate": 1.7500000000000002e-05,
"loss": 0.4242,
"step": 190
},
{
"epoch": 0.125,
"eval_accuracy": 0.8375,
"eval_f1": 0.8365807668133248,
"eval_loss": 0.42270001769065857,
"eval_precision": 0.8452685421994885,
"eval_runtime": 69.4902,
"eval_samples_per_second": 2.302,
"eval_steps_per_second": 0.288,
"step": 190
},
{
"epoch": 1.006578947368421,
"grad_norm": 26.072763442993164,
"learning_rate": 1.736842105263158e-05,
"loss": 0.2755,
"step": 200
},
{
"epoch": 1.013157894736842,
"grad_norm": 16.94261932373047,
"learning_rate": 1.723684210526316e-05,
"loss": 0.4916,
"step": 210
},
{
"epoch": 1.019736842105263,
"grad_norm": 11.323436737060547,
"learning_rate": 1.7105263157894737e-05,
"loss": 0.2864,
"step": 220
},
{
"epoch": 1.0263157894736843,
"grad_norm": 5.436691761016846,
"learning_rate": 1.6973684210526318e-05,
"loss": 0.3343,
"step": 230
},
{
"epoch": 1.0328947368421053,
"grad_norm": 19.71637725830078,
"learning_rate": 1.6842105263157896e-05,
"loss": 0.3933,
"step": 240
},
{
"epoch": 1.0394736842105263,
"grad_norm": 7.336604595184326,
"learning_rate": 1.6710526315789475e-05,
"loss": 0.3754,
"step": 250
},
{
"epoch": 1.0460526315789473,
"grad_norm": 23.727394104003906,
"learning_rate": 1.6578947368421053e-05,
"loss": 0.2446,
"step": 260
},
{
"epoch": 1.0526315789473684,
"grad_norm": 21.048952102661133,
"learning_rate": 1.644736842105263e-05,
"loss": 0.3691,
"step": 270
},
{
"epoch": 1.0592105263157894,
"grad_norm": 1.7281363010406494,
"learning_rate": 1.6315789473684213e-05,
"loss": 0.363,
"step": 280
},
{
"epoch": 1.0657894736842106,
"grad_norm": 17.512372970581055,
"learning_rate": 1.618421052631579e-05,
"loss": 0.365,
"step": 290
},
{
"epoch": 1.0723684210526316,
"grad_norm": 4.898046016693115,
"learning_rate": 1.605263157894737e-05,
"loss": 0.1919,
"step": 300
},
{
"epoch": 1.0789473684210527,
"grad_norm": 23.86078643798828,
"learning_rate": 1.5921052631578948e-05,
"loss": 0.3222,
"step": 310
},
{
"epoch": 1.0855263157894737,
"grad_norm": 14.71253490447998,
"learning_rate": 1.578947368421053e-05,
"loss": 0.3996,
"step": 320
},
{
"epoch": 1.0921052631578947,
"grad_norm": 15.617661476135254,
"learning_rate": 1.5657894736842107e-05,
"loss": 0.2916,
"step": 330
},
{
"epoch": 1.0986842105263157,
"grad_norm": 16.566120147705078,
"learning_rate": 1.5526315789473686e-05,
"loss": 0.2138,
"step": 340
},
{
"epoch": 1.1052631578947367,
"grad_norm": 19.48283576965332,
"learning_rate": 1.5394736842105264e-05,
"loss": 0.3296,
"step": 350
},
{
"epoch": 1.111842105263158,
"grad_norm": 25.25094223022461,
"learning_rate": 1.5263157894736846e-05,
"loss": 0.5412,
"step": 360
},
{
"epoch": 1.118421052631579,
"grad_norm": 2.2683284282684326,
"learning_rate": 1.5131578947368422e-05,
"loss": 0.2906,
"step": 370
},
{
"epoch": 1.125,
"grad_norm": 31.231639862060547,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.3906,
"step": 380
},
{
"epoch": 1.125,
"eval_accuracy": 0.9,
"eval_f1": 0.8999374609130708,
"eval_loss": 0.2932564616203308,
"eval_precision": 0.9010025062656641,
"eval_runtime": 73.6902,
"eval_samples_per_second": 2.171,
"eval_steps_per_second": 0.271,
"step": 380
},
{
"epoch": 2.0065789473684212,
"grad_norm": 0.302801251411438,
"learning_rate": 1.486842105263158e-05,
"loss": 0.148,
"step": 390
},
{
"epoch": 2.013157894736842,
"grad_norm": 2.899660587310791,
"learning_rate": 1.4736842105263159e-05,
"loss": 0.3116,
"step": 400
},
{
"epoch": 2.0197368421052633,
"grad_norm": 0.4640880227088928,
"learning_rate": 1.4605263157894739e-05,
"loss": 0.131,
"step": 410
},
{
"epoch": 2.026315789473684,
"grad_norm": 28.330896377563477,
"learning_rate": 1.4473684210526317e-05,
"loss": 0.1221,
"step": 420
},
{
"epoch": 2.0328947368421053,
"grad_norm": 0.6787762641906738,
"learning_rate": 1.4342105263157895e-05,
"loss": 0.1547,
"step": 430
},
{
"epoch": 2.039473684210526,
"grad_norm": 27.384422302246094,
"learning_rate": 1.4210526315789475e-05,
"loss": 0.2902,
"step": 440
},
{
"epoch": 2.0460526315789473,
"grad_norm": 18.884803771972656,
"learning_rate": 1.4078947368421055e-05,
"loss": 0.2381,
"step": 450
},
{
"epoch": 2.0526315789473686,
"grad_norm": 2.0699939727783203,
"learning_rate": 1.3947368421052631e-05,
"loss": 0.1437,
"step": 460
},
{
"epoch": 2.0592105263157894,
"grad_norm": 0.6573389768600464,
"learning_rate": 1.3815789473684211e-05,
"loss": 0.54,
"step": 470
},
{
"epoch": 2.0657894736842106,
"grad_norm": 24.888164520263672,
"learning_rate": 1.3684210526315791e-05,
"loss": 0.2222,
"step": 480
},
{
"epoch": 2.0723684210526314,
"grad_norm": 5.3757643699646,
"learning_rate": 1.3552631578947371e-05,
"loss": 0.3766,
"step": 490
},
{
"epoch": 2.0789473684210527,
"grad_norm": 8.543233871459961,
"learning_rate": 1.3421052631578948e-05,
"loss": 0.2016,
"step": 500
},
{
"epoch": 2.085526315789474,
"grad_norm": 24.547653198242188,
"learning_rate": 1.3289473684210528e-05,
"loss": 0.1867,
"step": 510
},
{
"epoch": 2.0921052631578947,
"grad_norm": 9.149017333984375,
"learning_rate": 1.3157894736842108e-05,
"loss": 0.1007,
"step": 520
},
{
"epoch": 2.098684210526316,
"grad_norm": 0.6297352313995361,
"learning_rate": 1.3026315789473684e-05,
"loss": 0.0951,
"step": 530
},
{
"epoch": 2.1052631578947367,
"grad_norm": 17.117029190063477,
"learning_rate": 1.2894736842105264e-05,
"loss": 0.405,
"step": 540
},
{
"epoch": 2.111842105263158,
"grad_norm": 1.1509796380996704,
"learning_rate": 1.2763157894736844e-05,
"loss": 0.3637,
"step": 550
},
{
"epoch": 2.1184210526315788,
"grad_norm": 13.600915908813477,
"learning_rate": 1.263157894736842e-05,
"loss": 0.2626,
"step": 560
},
{
"epoch": 2.125,
"grad_norm": 3.0041847229003906,
"learning_rate": 1.25e-05,
"loss": 0.3199,
"step": 570
},
{
"epoch": 2.125,
"eval_accuracy": 0.93125,
"eval_f1": 0.931247314348217,
"eval_loss": 0.30343881249427795,
"eval_precision": 0.9313173933427098,
"eval_runtime": 65.8695,
"eval_samples_per_second": 2.429,
"eval_steps_per_second": 0.304,
"step": 570
},
{
"epoch": 3.0065789473684212,
"grad_norm": 3.123574733734131,
"learning_rate": 1.236842105263158e-05,
"loss": 0.1695,
"step": 580
},
{
"epoch": 3.013157894736842,
"grad_norm": 30.406293869018555,
"learning_rate": 1.2236842105263159e-05,
"loss": 0.2813,
"step": 590
},
{
"epoch": 3.0197368421052633,
"grad_norm": 14.729782104492188,
"learning_rate": 1.2105263157894737e-05,
"loss": 0.2084,
"step": 600
},
{
"epoch": 3.026315789473684,
"grad_norm": 6.453456401824951,
"learning_rate": 1.1973684210526317e-05,
"loss": 0.3726,
"step": 610
},
{
"epoch": 3.0328947368421053,
"grad_norm": 0.5487316846847534,
"learning_rate": 1.1842105263157895e-05,
"loss": 0.3515,
"step": 620
},
{
"epoch": 3.039473684210526,
"grad_norm": 2.270763635635376,
"learning_rate": 1.1710526315789475e-05,
"loss": 0.0221,
"step": 630
},
{
"epoch": 3.0460526315789473,
"grad_norm": 13.998810768127441,
"learning_rate": 1.1578947368421053e-05,
"loss": 0.33,
"step": 640
},
{
"epoch": 3.0526315789473686,
"grad_norm": 8.822827339172363,
"learning_rate": 1.1447368421052632e-05,
"loss": 0.161,
"step": 650
},
{
"epoch": 3.0592105263157894,
"grad_norm": 29.708253860473633,
"learning_rate": 1.1315789473684212e-05,
"loss": 0.2569,
"step": 660
},
{
"epoch": 3.0657894736842106,
"grad_norm": 23.062585830688477,
"learning_rate": 1.1184210526315792e-05,
"loss": 0.2207,
"step": 670
},
{
"epoch": 3.0723684210526314,
"grad_norm": 10.31871509552002,
"learning_rate": 1.105263157894737e-05,
"loss": 0.3339,
"step": 680
},
{
"epoch": 3.0789473684210527,
"grad_norm": 2.75856351852417,
"learning_rate": 1.0921052631578948e-05,
"loss": 0.0798,
"step": 690
},
{
"epoch": 3.085526315789474,
"grad_norm": 1.439271330833435,
"learning_rate": 1.0789473684210528e-05,
"loss": 0.107,
"step": 700
},
{
"epoch": 3.0921052631578947,
"grad_norm": 0.4239494502544403,
"learning_rate": 1.0657894736842108e-05,
"loss": 0.1158,
"step": 710
},
{
"epoch": 3.098684210526316,
"grad_norm": 16.95366859436035,
"learning_rate": 1.0526315789473684e-05,
"loss": 0.2155,
"step": 720
},
{
"epoch": 3.1052631578947367,
"grad_norm": 20.484813690185547,
"learning_rate": 1.0394736842105264e-05,
"loss": 0.2505,
"step": 730
},
{
"epoch": 3.111842105263158,
"grad_norm": 31.08599853515625,
"learning_rate": 1.0263157894736844e-05,
"loss": 0.4975,
"step": 740
},
{
"epoch": 3.1184210526315788,
"grad_norm": 22.264848709106445,
"learning_rate": 1.0131578947368421e-05,
"loss": 0.2602,
"step": 750
},
{
"epoch": 3.125,
"grad_norm": 20.796369552612305,
"learning_rate": 1e-05,
"loss": 0.2239,
"step": 760
},
{
"epoch": 3.125,
"eval_accuracy": 0.9125,
"eval_f1": 0.912445278298937,
"eval_loss": 0.3610968291759491,
"eval_precision": 0.9135338345864662,
"eval_runtime": 68.362,
"eval_samples_per_second": 2.34,
"eval_steps_per_second": 0.293,
"step": 760
},
{
"epoch": 4.006578947368421,
"grad_norm": 0.06610066443681717,
"learning_rate": 9.868421052631579e-06,
"loss": 0.2882,
"step": 770
},
{
"epoch": 4.0131578947368425,
"grad_norm": 3.3229968547821045,
"learning_rate": 9.736842105263159e-06,
"loss": 0.148,
"step": 780
},
{
"epoch": 4.019736842105263,
"grad_norm": 18.369155883789062,
"learning_rate": 9.605263157894737e-06,
"loss": 0.2034,
"step": 790
},
{
"epoch": 4.026315789473684,
"grad_norm": 0.4686603844165802,
"learning_rate": 9.473684210526315e-06,
"loss": 0.1949,
"step": 800
},
{
"epoch": 4.032894736842105,
"grad_norm": 0.3468804359436035,
"learning_rate": 9.342105263157895e-06,
"loss": 0.1468,
"step": 810
},
{
"epoch": 4.0394736842105265,
"grad_norm": 5.2816243171691895,
"learning_rate": 9.210526315789474e-06,
"loss": 0.1253,
"step": 820
},
{
"epoch": 4.046052631578948,
"grad_norm": 3.5091466903686523,
"learning_rate": 9.078947368421054e-06,
"loss": 0.0314,
"step": 830
},
{
"epoch": 4.052631578947368,
"grad_norm": 38.08211135864258,
"learning_rate": 8.947368421052632e-06,
"loss": 0.3176,
"step": 840
},
{
"epoch": 4.059210526315789,
"grad_norm": 0.1611049771308899,
"learning_rate": 8.81578947368421e-06,
"loss": 0.1931,
"step": 850
},
{
"epoch": 4.065789473684211,
"grad_norm": 0.5490935444831848,
"learning_rate": 8.68421052631579e-06,
"loss": 0.4194,
"step": 860
},
{
"epoch": 4.072368421052632,
"grad_norm": 4.411422252655029,
"learning_rate": 8.552631578947368e-06,
"loss": 0.1544,
"step": 870
},
{
"epoch": 4.078947368421052,
"grad_norm": 0.4949572682380676,
"learning_rate": 8.421052631578948e-06,
"loss": 0.3998,
"step": 880
},
{
"epoch": 4.0855263157894735,
"grad_norm": 30.750051498413086,
"learning_rate": 8.289473684210526e-06,
"loss": 0.1656,
"step": 890
},
{
"epoch": 4.092105263157895,
"grad_norm": 16.065580368041992,
"learning_rate": 8.157894736842106e-06,
"loss": 0.0652,
"step": 900
},
{
"epoch": 4.098684210526316,
"grad_norm": 21.529672622680664,
"learning_rate": 8.026315789473685e-06,
"loss": 0.1112,
"step": 910
},
{
"epoch": 4.105263157894737,
"grad_norm": 1.2219691276550293,
"learning_rate": 7.894736842105265e-06,
"loss": 0.2536,
"step": 920
},
{
"epoch": 4.1118421052631575,
"grad_norm": 1.4607069492340088,
"learning_rate": 7.763157894736843e-06,
"loss": 0.0822,
"step": 930
},
{
"epoch": 4.118421052631579,
"grad_norm": 35.09339904785156,
"learning_rate": 7.631578947368423e-06,
"loss": 0.3982,
"step": 940
},
{
"epoch": 4.125,
"grad_norm": 4.976933479309082,
"learning_rate": 7.500000000000001e-06,
"loss": 0.1747,
"step": 950
},
{
"epoch": 4.125,
"eval_accuracy": 0.93125,
"eval_f1": 0.931247314348217,
"eval_loss": 0.34753698110580444,
"eval_precision": 0.9313173933427098,
"eval_runtime": 73.1814,
"eval_samples_per_second": 2.186,
"eval_steps_per_second": 0.273,
"step": 950
},
{
"epoch": 5.006578947368421,
"grad_norm": 0.45159298181533813,
"learning_rate": 7.368421052631579e-06,
"loss": 0.1255,
"step": 960
},
{
"epoch": 5.0131578947368425,
"grad_norm": 29.889768600463867,
"learning_rate": 7.236842105263158e-06,
"loss": 0.1549,
"step": 970
},
{
"epoch": 5.019736842105263,
"grad_norm": 6.2291412353515625,
"learning_rate": 7.1052631578947375e-06,
"loss": 0.1345,
"step": 980
},
{
"epoch": 5.026315789473684,
"grad_norm": 17.853551864624023,
"learning_rate": 6.973684210526316e-06,
"loss": 0.0786,
"step": 990
},
{
"epoch": 5.032894736842105,
"grad_norm": 0.2519562244415283,
"learning_rate": 6.842105263157896e-06,
"loss": 0.1226,
"step": 1000
},
{
"epoch": 5.0394736842105265,
"grad_norm": 17.962093353271484,
"learning_rate": 6.710526315789474e-06,
"loss": 0.2208,
"step": 1010
},
{
"epoch": 5.046052631578948,
"grad_norm": 0.25263816118240356,
"learning_rate": 6.578947368421054e-06,
"loss": 0.0662,
"step": 1020
},
{
"epoch": 5.052631578947368,
"grad_norm": 0.3767828345298767,
"learning_rate": 6.447368421052632e-06,
"loss": 0.1212,
"step": 1030
},
{
"epoch": 5.059210526315789,
"grad_norm": 31.959402084350586,
"learning_rate": 6.31578947368421e-06,
"loss": 0.4038,
"step": 1040
},
{
"epoch": 5.065789473684211,
"grad_norm": 28.180463790893555,
"learning_rate": 6.18421052631579e-06,
"loss": 0.1677,
"step": 1050
},
{
"epoch": 5.072368421052632,
"grad_norm": 0.06592092663049698,
"learning_rate": 6.0526315789473685e-06,
"loss": 0.1123,
"step": 1060
},
{
"epoch": 5.078947368421052,
"grad_norm": 19.177854537963867,
"learning_rate": 5.921052631578948e-06,
"loss": 0.4745,
"step": 1070
},
{
"epoch": 5.0855263157894735,
"grad_norm": 39.3027458190918,
"learning_rate": 5.789473684210527e-06,
"loss": 0.2614,
"step": 1080
},
{
"epoch": 5.092105263157895,
"grad_norm": 12.32589054107666,
"learning_rate": 5.657894736842106e-06,
"loss": 0.0943,
"step": 1090
},
{
"epoch": 5.098684210526316,
"grad_norm": 17.65743064880371,
"learning_rate": 5.526315789473685e-06,
"loss": 0.1302,
"step": 1100
},
{
"epoch": 5.105263157894737,
"grad_norm": 0.037535008043050766,
"learning_rate": 5.394736842105264e-06,
"loss": 0.1401,
"step": 1110
},
{
"epoch": 5.1118421052631575,
"grad_norm": 1.1971192359924316,
"learning_rate": 5.263157894736842e-06,
"loss": 0.1045,
"step": 1120
},
{
"epoch": 5.118421052631579,
"grad_norm": 0.809428334236145,
"learning_rate": 5.131578947368422e-06,
"loss": 0.2402,
"step": 1130
},
{
"epoch": 5.125,
"grad_norm": 0.5052193999290466,
"learning_rate": 5e-06,
"loss": 0.1702,
"step": 1140
},
{
"epoch": 5.125,
"eval_accuracy": 0.93125,
"eval_f1": 0.931247314348217,
"eval_loss": 0.36672312021255493,
"eval_precision": 0.9313173933427098,
"eval_runtime": 73.77,
"eval_samples_per_second": 2.169,
"eval_steps_per_second": 0.271,
"step": 1140
},
{
"epoch": 5.125,
"step": 1140,
"total_flos": 0.0,
"train_loss": 0.43041260364024264,
"train_runtime": 5887.2517,
"train_samples_per_second": 2.065,
"train_steps_per_second": 0.258
},
{
"epoch": 5.125,
"eval_accuracy": 0.8876404494382022,
"eval_f1": 0.887419371456348,
"eval_loss": 0.37810325622558594,
"eval_precision": 0.8908100139372724,
"eval_runtime": 376.8406,
"eval_samples_per_second": 2.126,
"eval_steps_per_second": 0.268,
"step": 1140
},
{
"epoch": 5.125,
"eval_accuracy": 0.8875,
"eval_f1": 0.8872717252436183,
"eval_loss": 0.37857261300086975,
"eval_precision": 0.8906643814900695,
"eval_runtime": 371.215,
"eval_samples_per_second": 2.155,
"eval_steps_per_second": 0.269,
"step": 1140
}
],
"logging_steps": 10,
"max_steps": 1520,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}