Shawon16's picture
Update trainer_state.json
e4896b4 verified
{
"best_metric": 0.633788037775446,
"best_model_checkpoint": "/media/cse/HDD/Shawon/shawon/Timesformer_tuning_BdSLW60_divided/checkpoint-1754",
"epoch": 2.1990867579908677,
"eval_steps": 500,
"global_step": 2190,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0045662100456621,
"grad_norm": 18.209203720092773,
"learning_rate": 2.2831050228310503e-06,
"loss": 4.331,
"step": 10
},
{
"epoch": 0.0091324200913242,
"grad_norm": 12.897218704223633,
"learning_rate": 4.566210045662101e-06,
"loss": 4.1036,
"step": 20
},
{
"epoch": 0.0136986301369863,
"grad_norm": 12.286799430847168,
"learning_rate": 6.849315068493151e-06,
"loss": 4.2229,
"step": 30
},
{
"epoch": 0.0182648401826484,
"grad_norm": 12.371978759765625,
"learning_rate": 9.132420091324201e-06,
"loss": 4.1291,
"step": 40
},
{
"epoch": 0.0228310502283105,
"grad_norm": 12.65040397644043,
"learning_rate": 1.1415525114155251e-05,
"loss": 4.1588,
"step": 50
},
{
"epoch": 0.0273972602739726,
"grad_norm": 12.903022766113281,
"learning_rate": 1.3698630136986302e-05,
"loss": 4.0512,
"step": 60
},
{
"epoch": 0.0319634703196347,
"grad_norm": 12.025260925292969,
"learning_rate": 1.5981735159817352e-05,
"loss": 3.8854,
"step": 70
},
{
"epoch": 0.0365296803652968,
"grad_norm": 12.173115730285645,
"learning_rate": 1.8264840182648402e-05,
"loss": 3.9969,
"step": 80
},
{
"epoch": 0.0410958904109589,
"grad_norm": 13.235687255859375,
"learning_rate": 2.0547945205479453e-05,
"loss": 3.8276,
"step": 90
},
{
"epoch": 0.045662100456621,
"grad_norm": 12.253507614135742,
"learning_rate": 2.2831050228310503e-05,
"loss": 3.7803,
"step": 100
},
{
"epoch": 0.0502283105022831,
"grad_norm": 12.216194152832031,
"learning_rate": 2.5114155251141553e-05,
"loss": 3.6321,
"step": 110
},
{
"epoch": 0.0547945205479452,
"grad_norm": 13.399759292602539,
"learning_rate": 2.7397260273972603e-05,
"loss": 3.4251,
"step": 120
},
{
"epoch": 0.0593607305936073,
"grad_norm": 12.781786918640137,
"learning_rate": 2.9680365296803654e-05,
"loss": 3.2993,
"step": 130
},
{
"epoch": 0.0639269406392694,
"grad_norm": 12.670400619506836,
"learning_rate": 3.1963470319634704e-05,
"loss": 3.2826,
"step": 140
},
{
"epoch": 0.0684931506849315,
"grad_norm": 13.571783065795898,
"learning_rate": 3.424657534246575e-05,
"loss": 3.1383,
"step": 150
},
{
"epoch": 0.0730593607305936,
"grad_norm": 13.101773262023926,
"learning_rate": 3.6529680365296805e-05,
"loss": 2.9979,
"step": 160
},
{
"epoch": 0.0776255707762557,
"grad_norm": 12.823298454284668,
"learning_rate": 3.881278538812785e-05,
"loss": 2.8539,
"step": 170
},
{
"epoch": 0.0821917808219178,
"grad_norm": 13.807844161987305,
"learning_rate": 4.1095890410958905e-05,
"loss": 2.9661,
"step": 180
},
{
"epoch": 0.0867579908675799,
"grad_norm": 13.598432540893555,
"learning_rate": 4.337899543378995e-05,
"loss": 2.7239,
"step": 190
},
{
"epoch": 0.091324200913242,
"grad_norm": 13.286529541015625,
"learning_rate": 4.5662100456621006e-05,
"loss": 2.4308,
"step": 200
},
{
"epoch": 0.0958904109589041,
"grad_norm": 12.907256126403809,
"learning_rate": 4.794520547945205e-05,
"loss": 2.3219,
"step": 210
},
{
"epoch": 0.1004566210045662,
"grad_norm": 12.69262409210205,
"learning_rate": 4.997463216641299e-05,
"loss": 2.0315,
"step": 220
},
{
"epoch": 0.1050228310502283,
"grad_norm": 11.776590347290039,
"learning_rate": 4.9720953830542875e-05,
"loss": 2.4161,
"step": 230
},
{
"epoch": 0.1095890410958904,
"grad_norm": 12.577189445495605,
"learning_rate": 4.9467275494672755e-05,
"loss": 1.9662,
"step": 240
},
{
"epoch": 0.1141552511415525,
"grad_norm": 11.328509330749512,
"learning_rate": 4.9213597158802636e-05,
"loss": 1.9721,
"step": 250
},
{
"epoch": 0.1187214611872146,
"grad_norm": 13.56044864654541,
"learning_rate": 4.895991882293252e-05,
"loss": 1.6344,
"step": 260
},
{
"epoch": 0.1232876712328767,
"grad_norm": 10.372136116027832,
"learning_rate": 4.8706240487062404e-05,
"loss": 1.5725,
"step": 270
},
{
"epoch": 0.1278538812785388,
"grad_norm": 9.894343376159668,
"learning_rate": 4.8452562151192285e-05,
"loss": 1.7684,
"step": 280
},
{
"epoch": 0.1324200913242009,
"grad_norm": 9.916196823120117,
"learning_rate": 4.819888381532217e-05,
"loss": 1.7534,
"step": 290
},
{
"epoch": 0.136986301369863,
"grad_norm": 10.576798439025879,
"learning_rate": 4.794520547945205e-05,
"loss": 1.5195,
"step": 300
},
{
"epoch": 0.1415525114155251,
"grad_norm": 11.507007598876953,
"learning_rate": 4.769152714358194e-05,
"loss": 1.671,
"step": 310
},
{
"epoch": 0.1461187214611872,
"grad_norm": 9.821493148803711,
"learning_rate": 4.743784880771182e-05,
"loss": 1.2848,
"step": 320
},
{
"epoch": 0.1506849315068493,
"grad_norm": 8.048973083496094,
"learning_rate": 4.71841704718417e-05,
"loss": 1.3263,
"step": 330
},
{
"epoch": 0.1552511415525114,
"grad_norm": 11.427051544189453,
"learning_rate": 4.693049213597159e-05,
"loss": 1.3725,
"step": 340
},
{
"epoch": 0.1598173515981735,
"grad_norm": 11.1187744140625,
"learning_rate": 4.667681380010147e-05,
"loss": 1.3067,
"step": 350
},
{
"epoch": 0.1643835616438356,
"grad_norm": 13.000616073608398,
"learning_rate": 4.642313546423136e-05,
"loss": 1.296,
"step": 360
},
{
"epoch": 0.1689497716894977,
"grad_norm": 8.552903175354004,
"learning_rate": 4.616945712836124e-05,
"loss": 1.1536,
"step": 370
},
{
"epoch": 0.1735159817351598,
"grad_norm": 8.447358131408691,
"learning_rate": 4.591577879249112e-05,
"loss": 1.1327,
"step": 380
},
{
"epoch": 0.1780821917808219,
"grad_norm": 11.575722694396973,
"learning_rate": 4.5662100456621006e-05,
"loss": 1.1091,
"step": 390
},
{
"epoch": 0.182648401826484,
"grad_norm": 10.289594650268555,
"learning_rate": 4.5408422120750886e-05,
"loss": 1.1887,
"step": 400
},
{
"epoch": 0.1872146118721461,
"grad_norm": 10.607954025268555,
"learning_rate": 4.5154743784880774e-05,
"loss": 0.8785,
"step": 410
},
{
"epoch": 0.1917808219178082,
"grad_norm": 8.302620887756348,
"learning_rate": 4.4901065449010655e-05,
"loss": 0.9114,
"step": 420
},
{
"epoch": 0.1963470319634703,
"grad_norm": 10.900007247924805,
"learning_rate": 4.4647387113140535e-05,
"loss": 1.014,
"step": 430
},
{
"epoch": 0.2009132420091324,
"grad_norm": 11.343582153320312,
"learning_rate": 4.439370877727042e-05,
"loss": 0.761,
"step": 440
},
{
"epoch": 0.2054794520547945,
"grad_norm": 7.5472092628479,
"learning_rate": 4.41400304414003e-05,
"loss": 1.02,
"step": 450
},
{
"epoch": 0.2100456621004566,
"grad_norm": 6.482777118682861,
"learning_rate": 4.3886352105530184e-05,
"loss": 0.7821,
"step": 460
},
{
"epoch": 0.2146118721461187,
"grad_norm": 8.665425300598145,
"learning_rate": 4.363267376966007e-05,
"loss": 0.7712,
"step": 470
},
{
"epoch": 0.2191780821917808,
"grad_norm": 6.963949203491211,
"learning_rate": 4.337899543378995e-05,
"loss": 0.741,
"step": 480
},
{
"epoch": 0.2237442922374429,
"grad_norm": 7.155235290527344,
"learning_rate": 4.312531709791984e-05,
"loss": 0.8244,
"step": 490
},
{
"epoch": 0.228310502283105,
"grad_norm": 7.2470197677612305,
"learning_rate": 4.287163876204972e-05,
"loss": 0.7631,
"step": 500
},
{
"epoch": 0.2328767123287671,
"grad_norm": 7.359896659851074,
"learning_rate": 4.26179604261796e-05,
"loss": 0.6634,
"step": 510
},
{
"epoch": 0.2374429223744292,
"grad_norm": 9.313497543334961,
"learning_rate": 4.236428209030949e-05,
"loss": 0.7052,
"step": 520
},
{
"epoch": 0.2420091324200913,
"grad_norm": 7.674174785614014,
"learning_rate": 4.211060375443937e-05,
"loss": 0.5931,
"step": 530
},
{
"epoch": 0.2465753424657534,
"grad_norm": 6.553604602813721,
"learning_rate": 4.1856925418569256e-05,
"loss": 0.6904,
"step": 540
},
{
"epoch": 0.2511415525114155,
"grad_norm": 11.845129013061523,
"learning_rate": 4.160324708269914e-05,
"loss": 0.7885,
"step": 550
},
{
"epoch": 0.2557077625570776,
"grad_norm": 9.967060089111328,
"learning_rate": 4.134956874682902e-05,
"loss": 0.9233,
"step": 560
},
{
"epoch": 0.2602739726027397,
"grad_norm": 10.10986042022705,
"learning_rate": 4.1095890410958905e-05,
"loss": 0.4369,
"step": 570
},
{
"epoch": 0.2648401826484018,
"grad_norm": 6.26952600479126,
"learning_rate": 4.0842212075088786e-05,
"loss": 0.5587,
"step": 580
},
{
"epoch": 0.2694063926940639,
"grad_norm": 10.414834022521973,
"learning_rate": 4.058853373921867e-05,
"loss": 0.6551,
"step": 590
},
{
"epoch": 0.273972602739726,
"grad_norm": 6.288766384124756,
"learning_rate": 4.0334855403348554e-05,
"loss": 0.6568,
"step": 600
},
{
"epoch": 0.2785388127853881,
"grad_norm": 5.77100944519043,
"learning_rate": 4.0081177067478435e-05,
"loss": 0.5473,
"step": 610
},
{
"epoch": 0.2831050228310502,
"grad_norm": 6.018027305603027,
"learning_rate": 3.982749873160832e-05,
"loss": 0.5149,
"step": 620
},
{
"epoch": 0.2876712328767123,
"grad_norm": 8.372048377990723,
"learning_rate": 3.95738203957382e-05,
"loss": 0.4584,
"step": 630
},
{
"epoch": 0.2922374429223744,
"grad_norm": 7.889549732208252,
"learning_rate": 3.932014205986809e-05,
"loss": 0.7308,
"step": 640
},
{
"epoch": 0.2968036529680365,
"grad_norm": 9.812642097473145,
"learning_rate": 3.906646372399797e-05,
"loss": 0.3976,
"step": 650
},
{
"epoch": 0.3013698630136986,
"grad_norm": 6.291619300842285,
"learning_rate": 3.881278538812785e-05,
"loss": 0.4715,
"step": 660
},
{
"epoch": 0.3059360730593607,
"grad_norm": 11.111459732055664,
"learning_rate": 3.855910705225774e-05,
"loss": 0.5013,
"step": 670
},
{
"epoch": 0.3105022831050228,
"grad_norm": 11.028929710388184,
"learning_rate": 3.830542871638762e-05,
"loss": 0.4116,
"step": 680
},
{
"epoch": 0.3150684931506849,
"grad_norm": 10.91637897491455,
"learning_rate": 3.80517503805175e-05,
"loss": 0.6619,
"step": 690
},
{
"epoch": 0.319634703196347,
"grad_norm": 5.09431791305542,
"learning_rate": 3.779807204464739e-05,
"loss": 0.5631,
"step": 700
},
{
"epoch": 0.3242009132420091,
"grad_norm": 7.607274532318115,
"learning_rate": 3.754439370877727e-05,
"loss": 0.7068,
"step": 710
},
{
"epoch": 0.3287671232876712,
"grad_norm": 5.097145080566406,
"learning_rate": 3.7290715372907156e-05,
"loss": 0.5534,
"step": 720
},
{
"epoch": 0.3333333333333333,
"grad_norm": 4.283755779266357,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.3874,
"step": 730
},
{
"epoch": 0.3378995433789954,
"grad_norm": 8.169587135314941,
"learning_rate": 3.678335870116692e-05,
"loss": 0.3478,
"step": 740
},
{
"epoch": 0.3424657534246575,
"grad_norm": 9.869154930114746,
"learning_rate": 3.6529680365296805e-05,
"loss": 0.5266,
"step": 750
},
{
"epoch": 0.3470319634703196,
"grad_norm": 2.9827332496643066,
"learning_rate": 3.6276002029426685e-05,
"loss": 0.4598,
"step": 760
},
{
"epoch": 0.3515981735159817,
"grad_norm": 5.095430850982666,
"learning_rate": 3.602232369355657e-05,
"loss": 0.4072,
"step": 770
},
{
"epoch": 0.3561643835616438,
"grad_norm": 5.7128119468688965,
"learning_rate": 3.5768645357686453e-05,
"loss": 0.4988,
"step": 780
},
{
"epoch": 0.3607305936073059,
"grad_norm": 7.71494722366333,
"learning_rate": 3.5514967021816334e-05,
"loss": 0.5598,
"step": 790
},
{
"epoch": 0.365296803652968,
"grad_norm": 3.576751470565796,
"learning_rate": 3.526128868594622e-05,
"loss": 0.5077,
"step": 800
},
{
"epoch": 0.3698630136986301,
"grad_norm": 3.606416702270508,
"learning_rate": 3.50076103500761e-05,
"loss": 0.3295,
"step": 810
},
{
"epoch": 0.3744292237442922,
"grad_norm": 3.141601324081421,
"learning_rate": 3.475393201420599e-05,
"loss": 0.318,
"step": 820
},
{
"epoch": 0.3789954337899543,
"grad_norm": 3.1620349884033203,
"learning_rate": 3.450025367833587e-05,
"loss": 0.4301,
"step": 830
},
{
"epoch": 0.3835616438356164,
"grad_norm": 7.8440985679626465,
"learning_rate": 3.424657534246575e-05,
"loss": 0.3342,
"step": 840
},
{
"epoch": 0.3881278538812785,
"grad_norm": 4.066171169281006,
"learning_rate": 3.399289700659564e-05,
"loss": 0.3352,
"step": 850
},
{
"epoch": 0.3926940639269406,
"grad_norm": 4.03795051574707,
"learning_rate": 3.373921867072552e-05,
"loss": 0.5177,
"step": 860
},
{
"epoch": 0.3972602739726027,
"grad_norm": 9.911918640136719,
"learning_rate": 3.34855403348554e-05,
"loss": 0.3063,
"step": 870
},
{
"epoch": 0.4004566210045662,
"eval_accuracy": 0.6012591815320042,
"eval_loss": 1.4254487752914429,
"eval_runtime": 240.276,
"eval_samples_per_second": 3.966,
"eval_steps_per_second": 1.985,
"step": 877
},
{
"epoch": 1.0013698630136987,
"grad_norm": 4.910429000854492,
"learning_rate": 3.323186199898529e-05,
"loss": 0.3396,
"step": 880
},
{
"epoch": 1.0059360730593607,
"grad_norm": 11.615983963012695,
"learning_rate": 3.297818366311517e-05,
"loss": 0.2476,
"step": 890
},
{
"epoch": 1.0105022831050228,
"grad_norm": 5.976046562194824,
"learning_rate": 3.2724505327245055e-05,
"loss": 0.267,
"step": 900
},
{
"epoch": 1.015068493150685,
"grad_norm": 11.546513557434082,
"learning_rate": 3.2470826991374936e-05,
"loss": 0.5572,
"step": 910
},
{
"epoch": 1.019634703196347,
"grad_norm": 4.033244609832764,
"learning_rate": 3.221714865550482e-05,
"loss": 0.2918,
"step": 920
},
{
"epoch": 1.0242009132420091,
"grad_norm": 1.627201795578003,
"learning_rate": 3.1963470319634704e-05,
"loss": 0.2512,
"step": 930
},
{
"epoch": 1.0287671232876712,
"grad_norm": 3.9416964054107666,
"learning_rate": 3.1709791983764585e-05,
"loss": 0.4496,
"step": 940
},
{
"epoch": 1.0333333333333334,
"grad_norm": 8.648749351501465,
"learning_rate": 3.145611364789447e-05,
"loss": 0.4884,
"step": 950
},
{
"epoch": 1.0378995433789955,
"grad_norm": 5.311582088470459,
"learning_rate": 3.120243531202435e-05,
"loss": 0.2819,
"step": 960
},
{
"epoch": 1.0424657534246575,
"grad_norm": 5.9733757972717285,
"learning_rate": 3.0948756976154234e-05,
"loss": 0.372,
"step": 970
},
{
"epoch": 1.0470319634703196,
"grad_norm": 10.992047309875488,
"learning_rate": 3.069507864028412e-05,
"loss": 0.4749,
"step": 980
},
{
"epoch": 1.0515981735159818,
"grad_norm": 2.805821180343628,
"learning_rate": 3.0441400304414e-05,
"loss": 0.2255,
"step": 990
},
{
"epoch": 1.0561643835616439,
"grad_norm": 1.54830002784729,
"learning_rate": 3.0187721968543886e-05,
"loss": 0.4999,
"step": 1000
},
{
"epoch": 1.060730593607306,
"grad_norm": 4.090977191925049,
"learning_rate": 2.993404363267377e-05,
"loss": 0.3041,
"step": 1010
},
{
"epoch": 1.065296803652968,
"grad_norm": 13.388101577758789,
"learning_rate": 2.9680365296803654e-05,
"loss": 0.3855,
"step": 1020
},
{
"epoch": 1.0698630136986302,
"grad_norm": 1.3952845335006714,
"learning_rate": 2.9426686960933534e-05,
"loss": 0.2041,
"step": 1030
},
{
"epoch": 1.0744292237442923,
"grad_norm": 1.3922096490859985,
"learning_rate": 2.917300862506342e-05,
"loss": 0.2754,
"step": 1040
},
{
"epoch": 1.0789954337899543,
"grad_norm": 5.259091854095459,
"learning_rate": 2.8919330289193303e-05,
"loss": 0.4592,
"step": 1050
},
{
"epoch": 1.0835616438356164,
"grad_norm": 8.525176048278809,
"learning_rate": 2.8665651953323187e-05,
"loss": 0.1942,
"step": 1060
},
{
"epoch": 1.0881278538812786,
"grad_norm": 5.207125186920166,
"learning_rate": 2.841197361745307e-05,
"loss": 0.3025,
"step": 1070
},
{
"epoch": 1.0926940639269407,
"grad_norm": 1.9372700452804565,
"learning_rate": 2.815829528158295e-05,
"loss": 0.3472,
"step": 1080
},
{
"epoch": 1.0972602739726027,
"grad_norm": 12.879546165466309,
"learning_rate": 2.7904616945712835e-05,
"loss": 0.3996,
"step": 1090
},
{
"epoch": 1.1018264840182648,
"grad_norm": 2.0068254470825195,
"learning_rate": 2.765093860984272e-05,
"loss": 0.3653,
"step": 1100
},
{
"epoch": 1.106392694063927,
"grad_norm": 4.621579647064209,
"learning_rate": 2.7397260273972603e-05,
"loss": 0.3694,
"step": 1110
},
{
"epoch": 1.110958904109589,
"grad_norm": 5.310023784637451,
"learning_rate": 2.7143581938102484e-05,
"loss": 0.2811,
"step": 1120
},
{
"epoch": 1.1155251141552511,
"grad_norm": 2.6011314392089844,
"learning_rate": 2.6889903602232368e-05,
"loss": 0.2318,
"step": 1130
},
{
"epoch": 1.1200913242009132,
"grad_norm": 16.11932945251465,
"learning_rate": 2.6636225266362252e-05,
"loss": 0.4196,
"step": 1140
},
{
"epoch": 1.1246575342465754,
"grad_norm": 10.792712211608887,
"learning_rate": 2.6382546930492136e-05,
"loss": 0.3332,
"step": 1150
},
{
"epoch": 1.1292237442922375,
"grad_norm": 0.7183528542518616,
"learning_rate": 2.612886859462202e-05,
"loss": 0.1916,
"step": 1160
},
{
"epoch": 1.1337899543378995,
"grad_norm": 8.360386848449707,
"learning_rate": 2.58751902587519e-05,
"loss": 0.2133,
"step": 1170
},
{
"epoch": 1.1383561643835616,
"grad_norm": 9.3360013961792,
"learning_rate": 2.5621511922881785e-05,
"loss": 0.1756,
"step": 1180
},
{
"epoch": 1.1429223744292236,
"grad_norm": 3.6500494480133057,
"learning_rate": 2.536783358701167e-05,
"loss": 0.2249,
"step": 1190
},
{
"epoch": 1.1474885844748859,
"grad_norm": 13.495393753051758,
"learning_rate": 2.5114155251141553e-05,
"loss": 0.4326,
"step": 1200
},
{
"epoch": 1.152054794520548,
"grad_norm": 1.119050145149231,
"learning_rate": 2.4860476915271437e-05,
"loss": 0.2966,
"step": 1210
},
{
"epoch": 1.15662100456621,
"grad_norm": 0.8353846073150635,
"learning_rate": 2.4606798579401318e-05,
"loss": 0.1481,
"step": 1220
},
{
"epoch": 1.1611872146118722,
"grad_norm": 4.413615703582764,
"learning_rate": 2.4353120243531202e-05,
"loss": 0.1566,
"step": 1230
},
{
"epoch": 1.1657534246575343,
"grad_norm": 3.4680347442626953,
"learning_rate": 2.4099441907661086e-05,
"loss": 0.2691,
"step": 1240
},
{
"epoch": 1.1703196347031963,
"grad_norm": 2.294340133666992,
"learning_rate": 2.384576357179097e-05,
"loss": 0.2126,
"step": 1250
},
{
"epoch": 1.1748858447488584,
"grad_norm": 0.6727573275566101,
"learning_rate": 2.359208523592085e-05,
"loss": 0.2691,
"step": 1260
},
{
"epoch": 1.1794520547945206,
"grad_norm": 4.724236488342285,
"learning_rate": 2.3338406900050735e-05,
"loss": 0.242,
"step": 1270
},
{
"epoch": 1.1840182648401827,
"grad_norm": 2.012953519821167,
"learning_rate": 2.308472856418062e-05,
"loss": 0.2324,
"step": 1280
},
{
"epoch": 1.1885844748858447,
"grad_norm": 5.335368633270264,
"learning_rate": 2.2831050228310503e-05,
"loss": 0.2389,
"step": 1290
},
{
"epoch": 1.1931506849315068,
"grad_norm": 8.303133010864258,
"learning_rate": 2.2577371892440387e-05,
"loss": 0.2827,
"step": 1300
},
{
"epoch": 1.197716894977169,
"grad_norm": 4.6699299812316895,
"learning_rate": 2.2323693556570268e-05,
"loss": 0.2348,
"step": 1310
},
{
"epoch": 1.202283105022831,
"grad_norm": 3.6652133464813232,
"learning_rate": 2.207001522070015e-05,
"loss": 0.2747,
"step": 1320
},
{
"epoch": 1.2068493150684931,
"grad_norm": 5.8564934730529785,
"learning_rate": 2.1816336884830036e-05,
"loss": 0.3638,
"step": 1330
},
{
"epoch": 1.2114155251141552,
"grad_norm": 2.1238956451416016,
"learning_rate": 2.156265854895992e-05,
"loss": 0.142,
"step": 1340
},
{
"epoch": 1.2159817351598174,
"grad_norm": 7.869147777557373,
"learning_rate": 2.13089802130898e-05,
"loss": 0.1685,
"step": 1350
},
{
"epoch": 1.2205479452054795,
"grad_norm": 4.023821830749512,
"learning_rate": 2.1055301877219685e-05,
"loss": 0.1917,
"step": 1360
},
{
"epoch": 1.2251141552511415,
"grad_norm": 12.940045356750488,
"learning_rate": 2.080162354134957e-05,
"loss": 0.2935,
"step": 1370
},
{
"epoch": 1.2296803652968036,
"grad_norm": 7.166123867034912,
"learning_rate": 2.0547945205479453e-05,
"loss": 0.2112,
"step": 1380
},
{
"epoch": 1.2342465753424658,
"grad_norm": 16.69542121887207,
"learning_rate": 2.0294266869609337e-05,
"loss": 0.1751,
"step": 1390
},
{
"epoch": 1.238812785388128,
"grad_norm": 7.8027448654174805,
"learning_rate": 2.0040588533739217e-05,
"loss": 0.2554,
"step": 1400
},
{
"epoch": 1.24337899543379,
"grad_norm": 4.178590774536133,
"learning_rate": 1.97869101978691e-05,
"loss": 0.2078,
"step": 1410
},
{
"epoch": 1.247945205479452,
"grad_norm": 7.469815731048584,
"learning_rate": 1.9533231861998985e-05,
"loss": 0.3719,
"step": 1420
},
{
"epoch": 1.252511415525114,
"grad_norm": 8.0973482131958,
"learning_rate": 1.927955352612887e-05,
"loss": 0.2117,
"step": 1430
},
{
"epoch": 1.2570776255707763,
"grad_norm": 7.601376533508301,
"learning_rate": 1.902587519025875e-05,
"loss": 0.3092,
"step": 1440
},
{
"epoch": 1.2616438356164383,
"grad_norm": 0.7640622854232788,
"learning_rate": 1.8772196854388634e-05,
"loss": 0.0979,
"step": 1450
},
{
"epoch": 1.2662100456621004,
"grad_norm": 2.080054521560669,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.2393,
"step": 1460
},
{
"epoch": 1.2707762557077626,
"grad_norm": 11.497714042663574,
"learning_rate": 1.8264840182648402e-05,
"loss": 0.2465,
"step": 1470
},
{
"epoch": 1.2753424657534247,
"grad_norm": 4.990231513977051,
"learning_rate": 1.8011161846778286e-05,
"loss": 0.1007,
"step": 1480
},
{
"epoch": 1.2799086757990867,
"grad_norm": 3.397449254989624,
"learning_rate": 1.7757483510908167e-05,
"loss": 0.2518,
"step": 1490
},
{
"epoch": 1.2844748858447488,
"grad_norm": 6.128615379333496,
"learning_rate": 1.750380517503805e-05,
"loss": 0.1396,
"step": 1500
},
{
"epoch": 1.2890410958904108,
"grad_norm": 10.955172538757324,
"learning_rate": 1.7250126839167935e-05,
"loss": 0.1437,
"step": 1510
},
{
"epoch": 1.293607305936073,
"grad_norm": 3.4163191318511963,
"learning_rate": 1.699644850329782e-05,
"loss": 0.3169,
"step": 1520
},
{
"epoch": 1.2981735159817351,
"grad_norm": 0.777817964553833,
"learning_rate": 1.67427701674277e-05,
"loss": 0.193,
"step": 1530
},
{
"epoch": 1.3027397260273972,
"grad_norm": 6.872499465942383,
"learning_rate": 1.6489091831557584e-05,
"loss": 0.2019,
"step": 1540
},
{
"epoch": 1.3073059360730594,
"grad_norm": 1.3268109560012817,
"learning_rate": 1.6235413495687468e-05,
"loss": 0.1058,
"step": 1550
},
{
"epoch": 1.3118721461187215,
"grad_norm": 0.33468785881996155,
"learning_rate": 1.5981735159817352e-05,
"loss": 0.2684,
"step": 1560
},
{
"epoch": 1.3164383561643835,
"grad_norm": 1.7843540906906128,
"learning_rate": 1.5728056823947236e-05,
"loss": 0.3024,
"step": 1570
},
{
"epoch": 1.3210045662100456,
"grad_norm": 1.9103947877883911,
"learning_rate": 1.5474378488077117e-05,
"loss": 0.3474,
"step": 1580
},
{
"epoch": 1.3255707762557076,
"grad_norm": 2.3000149726867676,
"learning_rate": 1.5220700152207e-05,
"loss": 0.1792,
"step": 1590
},
{
"epoch": 1.33013698630137,
"grad_norm": 0.66684490442276,
"learning_rate": 1.4967021816336885e-05,
"loss": 0.239,
"step": 1600
},
{
"epoch": 1.334703196347032,
"grad_norm": 0.592566967010498,
"learning_rate": 1.4713343480466767e-05,
"loss": 0.3183,
"step": 1610
},
{
"epoch": 1.339269406392694,
"grad_norm": 6.709100246429443,
"learning_rate": 1.4459665144596651e-05,
"loss": 0.2196,
"step": 1620
},
{
"epoch": 1.3438356164383563,
"grad_norm": 0.2524738907814026,
"learning_rate": 1.4205986808726535e-05,
"loss": 0.1112,
"step": 1630
},
{
"epoch": 1.3484018264840183,
"grad_norm": 0.8381848931312561,
"learning_rate": 1.3952308472856418e-05,
"loss": 0.0951,
"step": 1640
},
{
"epoch": 1.3529680365296803,
"grad_norm": 6.706734657287598,
"learning_rate": 1.3698630136986302e-05,
"loss": 0.2305,
"step": 1650
},
{
"epoch": 1.3575342465753424,
"grad_norm": 0.4047660231590271,
"learning_rate": 1.3444951801116184e-05,
"loss": 0.2735,
"step": 1660
},
{
"epoch": 1.3621004566210044,
"grad_norm": 10.571824073791504,
"learning_rate": 1.3191273465246068e-05,
"loss": 0.2175,
"step": 1670
},
{
"epoch": 1.3666666666666667,
"grad_norm": 0.39465513825416565,
"learning_rate": 1.293759512937595e-05,
"loss": 0.1665,
"step": 1680
},
{
"epoch": 1.3712328767123287,
"grad_norm": 5.5752739906311035,
"learning_rate": 1.2683916793505835e-05,
"loss": 0.2829,
"step": 1690
},
{
"epoch": 1.3757990867579908,
"grad_norm": 0.23495937883853912,
"learning_rate": 1.2430238457635719e-05,
"loss": 0.1155,
"step": 1700
},
{
"epoch": 1.380365296803653,
"grad_norm": 4.892249584197998,
"learning_rate": 1.2176560121765601e-05,
"loss": 0.1875,
"step": 1710
},
{
"epoch": 1.384931506849315,
"grad_norm": 6.203292369842529,
"learning_rate": 1.1922881785895485e-05,
"loss": 0.198,
"step": 1720
},
{
"epoch": 1.3894977168949771,
"grad_norm": 4.22658109664917,
"learning_rate": 1.1669203450025367e-05,
"loss": 0.2684,
"step": 1730
},
{
"epoch": 1.3940639269406392,
"grad_norm": 1.199587106704712,
"learning_rate": 1.1415525114155251e-05,
"loss": 0.1568,
"step": 1740
},
{
"epoch": 1.3986301369863012,
"grad_norm": 2.2809574604034424,
"learning_rate": 1.1161846778285134e-05,
"loss": 0.1077,
"step": 1750
},
{
"epoch": 1.4004566210045661,
"eval_accuracy": 0.633788037775446,
"eval_loss": 1.3121135234832764,
"eval_runtime": 244.499,
"eval_samples_per_second": 3.898,
"eval_steps_per_second": 1.951,
"step": 1754
},
{
"epoch": 2.0027397260273974,
"grad_norm": 1.9538928270339966,
"learning_rate": 1.0908168442415018e-05,
"loss": 0.14,
"step": 1760
},
{
"epoch": 2.007305936073059,
"grad_norm": 0.5063276290893555,
"learning_rate": 1.06544901065449e-05,
"loss": 0.0791,
"step": 1770
},
{
"epoch": 2.0118721461187214,
"grad_norm": 10.108781814575195,
"learning_rate": 1.0400811770674784e-05,
"loss": 0.1614,
"step": 1780
},
{
"epoch": 2.0164383561643837,
"grad_norm": 2.624630928039551,
"learning_rate": 1.0147133434804668e-05,
"loss": 0.099,
"step": 1790
},
{
"epoch": 2.0210045662100455,
"grad_norm": 0.6380520462989807,
"learning_rate": 9.89345509893455e-06,
"loss": 0.2079,
"step": 1800
},
{
"epoch": 2.025570776255708,
"grad_norm": 1.0283434391021729,
"learning_rate": 9.639776763064435e-06,
"loss": 0.2092,
"step": 1810
},
{
"epoch": 2.03013698630137,
"grad_norm": 9.170719146728516,
"learning_rate": 9.386098427194317e-06,
"loss": 0.2139,
"step": 1820
},
{
"epoch": 2.034703196347032,
"grad_norm": 5.085514068603516,
"learning_rate": 9.132420091324201e-06,
"loss": 0.1782,
"step": 1830
},
{
"epoch": 2.039269406392694,
"grad_norm": 7.392034530639648,
"learning_rate": 8.878741755454084e-06,
"loss": 0.1358,
"step": 1840
},
{
"epoch": 2.043835616438356,
"grad_norm": 10.423238754272461,
"learning_rate": 8.625063419583968e-06,
"loss": 0.2036,
"step": 1850
},
{
"epoch": 2.0484018264840183,
"grad_norm": 5.6207170486450195,
"learning_rate": 8.37138508371385e-06,
"loss": 0.089,
"step": 1860
},
{
"epoch": 2.0529680365296805,
"grad_norm": 0.16385725140571594,
"learning_rate": 8.117706747843734e-06,
"loss": 0.138,
"step": 1870
},
{
"epoch": 2.0575342465753423,
"grad_norm": 0.1996384710073471,
"learning_rate": 7.864028411973618e-06,
"loss": 0.2014,
"step": 1880
},
{
"epoch": 2.0621004566210046,
"grad_norm": 0.25765588879585266,
"learning_rate": 7.6103500761035e-06,
"loss": 0.1384,
"step": 1890
},
{
"epoch": 2.066666666666667,
"grad_norm": 11.252253532409668,
"learning_rate": 7.356671740233384e-06,
"loss": 0.1816,
"step": 1900
},
{
"epoch": 2.0712328767123287,
"grad_norm": 10.344156265258789,
"learning_rate": 7.102993404363268e-06,
"loss": 0.2954,
"step": 1910
},
{
"epoch": 2.075799086757991,
"grad_norm": 0.31745678186416626,
"learning_rate": 6.849315068493151e-06,
"loss": 0.0895,
"step": 1920
},
{
"epoch": 2.080365296803653,
"grad_norm": 5.952996730804443,
"learning_rate": 6.595636732623034e-06,
"loss": 0.2875,
"step": 1930
},
{
"epoch": 2.084931506849315,
"grad_norm": 2.1418561935424805,
"learning_rate": 6.341958396752917e-06,
"loss": 0.09,
"step": 1940
},
{
"epoch": 2.0894977168949773,
"grad_norm": 8.547797203063965,
"learning_rate": 6.0882800608828005e-06,
"loss": 0.1073,
"step": 1950
},
{
"epoch": 2.094063926940639,
"grad_norm": 0.28819894790649414,
"learning_rate": 5.834601725012684e-06,
"loss": 0.1083,
"step": 1960
},
{
"epoch": 2.0986301369863014,
"grad_norm": 4.290701866149902,
"learning_rate": 5.580923389142567e-06,
"loss": 0.1421,
"step": 1970
},
{
"epoch": 2.1031963470319637,
"grad_norm": 2.7559561729431152,
"learning_rate": 5.32724505327245e-06,
"loss": 0.1769,
"step": 1980
},
{
"epoch": 2.1077625570776255,
"grad_norm": 0.0814567431807518,
"learning_rate": 5.073566717402334e-06,
"loss": 0.1929,
"step": 1990
},
{
"epoch": 2.1123287671232878,
"grad_norm": 0.14855796098709106,
"learning_rate": 4.819888381532217e-06,
"loss": 0.222,
"step": 2000
},
{
"epoch": 2.1168949771689496,
"grad_norm": 5.517074108123779,
"learning_rate": 4.566210045662101e-06,
"loss": 0.0973,
"step": 2010
},
{
"epoch": 2.121461187214612,
"grad_norm": 5.795365810394287,
"learning_rate": 4.312531709791984e-06,
"loss": 0.2053,
"step": 2020
},
{
"epoch": 2.126027397260274,
"grad_norm": 3.86086106300354,
"learning_rate": 4.058853373921867e-06,
"loss": 0.177,
"step": 2030
},
{
"epoch": 2.130593607305936,
"grad_norm": 8.300323486328125,
"learning_rate": 3.80517503805175e-06,
"loss": 0.2199,
"step": 2040
},
{
"epoch": 2.135159817351598,
"grad_norm": 12.849746704101562,
"learning_rate": 3.551496702181634e-06,
"loss": 0.2226,
"step": 2050
},
{
"epoch": 2.1397260273972605,
"grad_norm": 9.738630294799805,
"learning_rate": 3.297818366311517e-06,
"loss": 0.0895,
"step": 2060
},
{
"epoch": 2.1442922374429223,
"grad_norm": 8.472663879394531,
"learning_rate": 3.0441400304414002e-06,
"loss": 0.1436,
"step": 2070
},
{
"epoch": 2.1488584474885846,
"grad_norm": 0.39250361919403076,
"learning_rate": 2.7904616945712835e-06,
"loss": 0.1693,
"step": 2080
},
{
"epoch": 2.1534246575342464,
"grad_norm": 0.13123255968093872,
"learning_rate": 2.536783358701167e-06,
"loss": 0.0442,
"step": 2090
},
{
"epoch": 2.1579908675799087,
"grad_norm": 0.15273036062717438,
"learning_rate": 2.2831050228310503e-06,
"loss": 0.0614,
"step": 2100
},
{
"epoch": 2.162557077625571,
"grad_norm": 3.393217086791992,
"learning_rate": 2.0294266869609335e-06,
"loss": 0.0838,
"step": 2110
},
{
"epoch": 2.1671232876712327,
"grad_norm": 0.510553240776062,
"learning_rate": 1.775748351090817e-06,
"loss": 0.0941,
"step": 2120
},
{
"epoch": 2.171689497716895,
"grad_norm": 8.017914772033691,
"learning_rate": 1.5220700152207001e-06,
"loss": 0.111,
"step": 2130
},
{
"epoch": 2.1762557077625573,
"grad_norm": 0.32205718755722046,
"learning_rate": 1.2683916793505835e-06,
"loss": 0.0666,
"step": 2140
},
{
"epoch": 2.180821917808219,
"grad_norm": 1.1410584449768066,
"learning_rate": 1.0147133434804667e-06,
"loss": 0.0567,
"step": 2150
},
{
"epoch": 2.1853881278538814,
"grad_norm": 0.22290684282779694,
"learning_rate": 7.610350076103501e-07,
"loss": 0.0834,
"step": 2160
},
{
"epoch": 2.189954337899543,
"grad_norm": 5.116283416748047,
"learning_rate": 5.073566717402334e-07,
"loss": 0.1786,
"step": 2170
},
{
"epoch": 2.1945205479452055,
"grad_norm": 1.6667741537094116,
"learning_rate": 2.536783358701167e-07,
"loss": 0.2055,
"step": 2180
},
{
"epoch": 2.1990867579908677,
"grad_norm": 8.045380592346191,
"learning_rate": 0.0,
"loss": 0.2208,
"step": 2190
},
{
"epoch": 2.1990867579908677,
"eval_accuracy": 0.5939139559286464,
"eval_loss": 1.4424338340759277,
"eval_runtime": 337.1936,
"eval_samples_per_second": 2.826,
"eval_steps_per_second": 1.415,
"step": 2190
},
{
"epoch": 2.1990867579908677,
"step": 2190,
"total_flos": 3.0707987980048663e+19,
"train_loss": 0.7329474766504819,
"train_runtime": 7235.1686,
"train_samples_per_second": 2.422,
"train_steps_per_second": 0.303
},
{
"epoch": 2.1990867579908677,
"eval_accuracy": 0.633788037775446,
"eval_loss": 1.3121134042739868,
"eval_runtime": 350.327,
"eval_samples_per_second": 2.72,
"eval_steps_per_second": 1.362,
"step": 2190
},
{
"epoch": 2.1990867579908677,
"eval_accuracy": 0.7222659323367427,
"eval_loss": 0.9983267188072205,
"eval_runtime": 400.1224,
"eval_samples_per_second": 3.177,
"eval_steps_per_second": 1.59,
"step": 2190
}
],
"logging_steps": 10,
"max_steps": 2190,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 2
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.0707987980048663e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}