Amit5674's picture
Upload folder using huggingface_hub
58ec004 verified
{
"best_metric": 0.951232302045097,
"best_model_checkpoint": "/home/avramit/classifier/04122025/classifier/binary/neodictabert-finetuned-binary-041225/checkpoint-1000",
"epoch": 0.9992323719706108,
"eval_steps": 500,
"global_step": 1139,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008772891764447855,
"grad_norm": 18.548049926757812,
"learning_rate": 4.0000000000000003e-07,
"loss": 0.712,
"step": 10
},
{
"epoch": 0.01754578352889571,
"grad_norm": 27.947410583496094,
"learning_rate": 8.000000000000001e-07,
"loss": 0.7286,
"step": 20
},
{
"epoch": 0.026318675293343568,
"grad_norm": 15.611265182495117,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.6691,
"step": 30
},
{
"epoch": 0.03509156705779142,
"grad_norm": 14.706952095031738,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.6678,
"step": 40
},
{
"epoch": 0.04386445882223928,
"grad_norm": 20.6095027923584,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6755,
"step": 50
},
{
"epoch": 0.052637350586687136,
"grad_norm": 15.785083770751953,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.6245,
"step": 60
},
{
"epoch": 0.06141024235113499,
"grad_norm": 16.40140724182129,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.6065,
"step": 70
},
{
"epoch": 0.07018313411558284,
"grad_norm": 16.08489990234375,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.5906,
"step": 80
},
{
"epoch": 0.0789560258800307,
"grad_norm": 10.633989334106445,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.5696,
"step": 90
},
{
"epoch": 0.08772891764447856,
"grad_norm": 13.910264015197754,
"learning_rate": 4.000000000000001e-06,
"loss": 0.5952,
"step": 100
},
{
"epoch": 0.09650180940892641,
"grad_norm": 16.332483291625977,
"learning_rate": 4.4e-06,
"loss": 0.5808,
"step": 110
},
{
"epoch": 0.10527470117337427,
"grad_norm": 12.419142723083496,
"learning_rate": 4.800000000000001e-06,
"loss": 0.564,
"step": 120
},
{
"epoch": 0.11404759293782213,
"grad_norm": 14.58597183227539,
"learning_rate": 5.2e-06,
"loss": 0.6409,
"step": 130
},
{
"epoch": 0.12282048470226999,
"grad_norm": 9.651647567749023,
"learning_rate": 5.600000000000001e-06,
"loss": 0.5049,
"step": 140
},
{
"epoch": 0.13159337646671784,
"grad_norm": 23.112552642822266,
"learning_rate": 6e-06,
"loss": 0.6125,
"step": 150
},
{
"epoch": 0.1403662682311657,
"grad_norm": 14.535902976989746,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.5401,
"step": 160
},
{
"epoch": 0.14913915999561356,
"grad_norm": 19.689529418945312,
"learning_rate": 6.800000000000001e-06,
"loss": 0.5239,
"step": 170
},
{
"epoch": 0.1579120517600614,
"grad_norm": 27.909992218017578,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.5906,
"step": 180
},
{
"epoch": 0.16668494352450927,
"grad_norm": 10.908613204956055,
"learning_rate": 7.600000000000001e-06,
"loss": 0.5276,
"step": 190
},
{
"epoch": 0.17545783528895711,
"grad_norm": 11.81062126159668,
"learning_rate": 8.000000000000001e-06,
"loss": 0.5355,
"step": 200
},
{
"epoch": 0.18423072705340499,
"grad_norm": 16.26637077331543,
"learning_rate": 8.400000000000001e-06,
"loss": 0.5265,
"step": 210
},
{
"epoch": 0.19300361881785283,
"grad_norm": 9.670770645141602,
"learning_rate": 8.8e-06,
"loss": 0.5206,
"step": 220
},
{
"epoch": 0.2017765105823007,
"grad_norm": 14.58901309967041,
"learning_rate": 9.200000000000002e-06,
"loss": 0.5791,
"step": 230
},
{
"epoch": 0.21054940234674854,
"grad_norm": 8.426600456237793,
"learning_rate": 9.600000000000001e-06,
"loss": 0.5681,
"step": 240
},
{
"epoch": 0.21932229411119641,
"grad_norm": 10.079155921936035,
"learning_rate": 1e-05,
"loss": 0.6321,
"step": 250
},
{
"epoch": 0.22809518587564426,
"grad_norm": 11.706363677978516,
"learning_rate": 1.04e-05,
"loss": 0.5798,
"step": 260
},
{
"epoch": 0.23686807764009213,
"grad_norm": 17.979509353637695,
"learning_rate": 1.0800000000000002e-05,
"loss": 0.5672,
"step": 270
},
{
"epoch": 0.24564096940453997,
"grad_norm": 10.209049224853516,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.4833,
"step": 280
},
{
"epoch": 0.25441386116898784,
"grad_norm": 15.83713150024414,
"learning_rate": 1.16e-05,
"loss": 0.5465,
"step": 290
},
{
"epoch": 0.2631867529334357,
"grad_norm": 14.48042106628418,
"learning_rate": 1.2e-05,
"loss": 0.5025,
"step": 300
},
{
"epoch": 0.27195964469788353,
"grad_norm": 14.64489459991455,
"learning_rate": 1.2400000000000002e-05,
"loss": 0.4883,
"step": 310
},
{
"epoch": 0.2807325364623314,
"grad_norm": 19.78868865966797,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.5079,
"step": 320
},
{
"epoch": 0.28950542822677927,
"grad_norm": 11.523838996887207,
"learning_rate": 1.3200000000000002e-05,
"loss": 0.5315,
"step": 330
},
{
"epoch": 0.2982783199912271,
"grad_norm": 7.584911346435547,
"learning_rate": 1.3600000000000002e-05,
"loss": 0.5383,
"step": 340
},
{
"epoch": 0.30705121175567496,
"grad_norm": 10.454729080200195,
"learning_rate": 1.4e-05,
"loss": 0.6835,
"step": 350
},
{
"epoch": 0.3158241035201228,
"grad_norm": 10.022806167602539,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.625,
"step": 360
},
{
"epoch": 0.3245969952845707,
"grad_norm": 7.622408866882324,
"learning_rate": 1.48e-05,
"loss": 0.6109,
"step": 370
},
{
"epoch": 0.33336988704901854,
"grad_norm": 6.975420951843262,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.5333,
"step": 380
},
{
"epoch": 0.3421427788134664,
"grad_norm": 10.684267044067383,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.4575,
"step": 390
},
{
"epoch": 0.35091567057791423,
"grad_norm": 13.571067810058594,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.5544,
"step": 400
},
{
"epoch": 0.35968856234236213,
"grad_norm": 7.205085277557373,
"learning_rate": 1.64e-05,
"loss": 0.6155,
"step": 410
},
{
"epoch": 0.36846145410680997,
"grad_norm": 13.097399711608887,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.5048,
"step": 420
},
{
"epoch": 0.3772343458712578,
"grad_norm": 16.442916870117188,
"learning_rate": 1.72e-05,
"loss": 0.4976,
"step": 430
},
{
"epoch": 0.38600723763570566,
"grad_norm": 6.001971244812012,
"learning_rate": 1.76e-05,
"loss": 0.4145,
"step": 440
},
{
"epoch": 0.3947801294001535,
"grad_norm": 2.986736297607422,
"learning_rate": 1.8e-05,
"loss": 0.5126,
"step": 450
},
{
"epoch": 0.4035530211646014,
"grad_norm": 9.169132232666016,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.5626,
"step": 460
},
{
"epoch": 0.41232591292904924,
"grad_norm": 29.881427764892578,
"learning_rate": 1.88e-05,
"loss": 0.5039,
"step": 470
},
{
"epoch": 0.4210988046934971,
"grad_norm": 14.136128425598145,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.4363,
"step": 480
},
{
"epoch": 0.42987169645794493,
"grad_norm": 10.021966934204102,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.4673,
"step": 490
},
{
"epoch": 0.43864458822239283,
"grad_norm": 14.060495376586914,
"learning_rate": 2e-05,
"loss": 0.3646,
"step": 500
},
{
"epoch": 0.43864458822239283,
"eval_accuracy": 0.8244752121482805,
"eval_f1": 0.77834179357022,
"eval_loss": 0.4058316648006439,
"eval_precision": 0.8146399055489965,
"eval_recall": 0.7451403887688985,
"eval_runtime": 561.5176,
"eval_samples_per_second": 3.987,
"eval_steps_per_second": 0.499,
"step": 500
},
{
"epoch": 0.44741747998684067,
"grad_norm": 60.427162170410156,
"learning_rate": 1.968701095461659e-05,
"loss": 0.5038,
"step": 510
},
{
"epoch": 0.4561903717512885,
"grad_norm": 10.006261825561523,
"learning_rate": 1.9374021909233177e-05,
"loss": 0.4059,
"step": 520
},
{
"epoch": 0.46496326351573636,
"grad_norm": 7.567866802215576,
"learning_rate": 1.9061032863849767e-05,
"loss": 0.4314,
"step": 530
},
{
"epoch": 0.47373615528018426,
"grad_norm": 12.703323364257812,
"learning_rate": 1.8748043818466356e-05,
"loss": 0.2459,
"step": 540
},
{
"epoch": 0.4825090470446321,
"grad_norm": 3.947957754135132,
"learning_rate": 1.8435054773082942e-05,
"loss": 0.1668,
"step": 550
},
{
"epoch": 0.49128193880907994,
"grad_norm": 9.213306427001953,
"learning_rate": 1.8122065727699532e-05,
"loss": 0.2323,
"step": 560
},
{
"epoch": 0.5000548305735278,
"grad_norm": 0.25514841079711914,
"learning_rate": 1.780907668231612e-05,
"loss": 0.1923,
"step": 570
},
{
"epoch": 0.5088277223379757,
"grad_norm": 50.12841796875,
"learning_rate": 1.7496087636932707e-05,
"loss": 0.4251,
"step": 580
},
{
"epoch": 0.5176006141024235,
"grad_norm": 6.860497951507568,
"learning_rate": 1.7183098591549297e-05,
"loss": 0.3019,
"step": 590
},
{
"epoch": 0.5263735058668714,
"grad_norm": 19.863380432128906,
"learning_rate": 1.6870109546165886e-05,
"loss": 0.1773,
"step": 600
},
{
"epoch": 0.5351463976313192,
"grad_norm": 0.8949945569038391,
"learning_rate": 1.6557120500782473e-05,
"loss": 0.1894,
"step": 610
},
{
"epoch": 0.5439192893957671,
"grad_norm": 17.815959930419922,
"learning_rate": 1.6244131455399062e-05,
"loss": 0.2715,
"step": 620
},
{
"epoch": 0.5526921811602149,
"grad_norm": 12.630502700805664,
"learning_rate": 1.5931142410015648e-05,
"loss": 0.0829,
"step": 630
},
{
"epoch": 0.5614650729246627,
"grad_norm": 10.575263977050781,
"learning_rate": 1.5618153364632238e-05,
"loss": 0.1798,
"step": 640
},
{
"epoch": 0.5702379646891107,
"grad_norm": 18.799726486206055,
"learning_rate": 1.5305164319248827e-05,
"loss": 0.2497,
"step": 650
},
{
"epoch": 0.5790108564535585,
"grad_norm": 0.6230038404464722,
"learning_rate": 1.4992175273865417e-05,
"loss": 0.1408,
"step": 660
},
{
"epoch": 0.5877837482180064,
"grad_norm": 0.14638830721378326,
"learning_rate": 1.4679186228482005e-05,
"loss": 0.2073,
"step": 670
},
{
"epoch": 0.5965566399824542,
"grad_norm": 11.391288757324219,
"learning_rate": 1.4366197183098594e-05,
"loss": 0.2668,
"step": 680
},
{
"epoch": 0.6053295317469021,
"grad_norm": 21.246145248413086,
"learning_rate": 1.4053208137715182e-05,
"loss": 0.157,
"step": 690
},
{
"epoch": 0.6141024235113499,
"grad_norm": 9.763635635375977,
"learning_rate": 1.374021909233177e-05,
"loss": 0.2703,
"step": 700
},
{
"epoch": 0.6228753152757978,
"grad_norm": 5.701905727386475,
"learning_rate": 1.342723004694836e-05,
"loss": 0.1502,
"step": 710
},
{
"epoch": 0.6316482070402456,
"grad_norm": 15.160579681396484,
"learning_rate": 1.3114241001564947e-05,
"loss": 0.1193,
"step": 720
},
{
"epoch": 0.6404210988046934,
"grad_norm": 0.9100450873374939,
"learning_rate": 1.2801251956181535e-05,
"loss": 0.1526,
"step": 730
},
{
"epoch": 0.6491939905691414,
"grad_norm": 1.2482988834381104,
"learning_rate": 1.2488262910798124e-05,
"loss": 0.0877,
"step": 740
},
{
"epoch": 0.6579668823335892,
"grad_norm": 33.362972259521484,
"learning_rate": 1.2175273865414712e-05,
"loss": 0.2326,
"step": 750
},
{
"epoch": 0.6667397740980371,
"grad_norm": 18.053966522216797,
"learning_rate": 1.18622848200313e-05,
"loss": 0.1339,
"step": 760
},
{
"epoch": 0.6755126658624849,
"grad_norm": 0.13135869801044464,
"learning_rate": 1.1549295774647888e-05,
"loss": 0.111,
"step": 770
},
{
"epoch": 0.6842855576269328,
"grad_norm": 0.08466053754091263,
"learning_rate": 1.1236306729264477e-05,
"loss": 0.1719,
"step": 780
},
{
"epoch": 0.6930584493913806,
"grad_norm": 10.705062866210938,
"learning_rate": 1.0923317683881065e-05,
"loss": 0.271,
"step": 790
},
{
"epoch": 0.7018313411558285,
"grad_norm": 21.25899314880371,
"learning_rate": 1.0610328638497653e-05,
"loss": 0.0995,
"step": 800
},
{
"epoch": 0.7106042329202763,
"grad_norm": 4.999286651611328,
"learning_rate": 1.0297339593114242e-05,
"loss": 0.0837,
"step": 810
},
{
"epoch": 0.7193771246847243,
"grad_norm": 8.578291893005371,
"learning_rate": 9.98435054773083e-06,
"loss": 0.1303,
"step": 820
},
{
"epoch": 0.7281500164491721,
"grad_norm": 14.489535331726074,
"learning_rate": 9.671361502347418e-06,
"loss": 0.0931,
"step": 830
},
{
"epoch": 0.7369229082136199,
"grad_norm": 0.21449220180511475,
"learning_rate": 9.358372456964007e-06,
"loss": 0.151,
"step": 840
},
{
"epoch": 0.7456957999780678,
"grad_norm": 6.524932384490967,
"learning_rate": 9.045383411580595e-06,
"loss": 0.0509,
"step": 850
},
{
"epoch": 0.7544686917425156,
"grad_norm": 12.212717056274414,
"learning_rate": 8.732394366197183e-06,
"loss": 0.2197,
"step": 860
},
{
"epoch": 0.7632415835069635,
"grad_norm": 7.826491832733154,
"learning_rate": 8.419405320813773e-06,
"loss": 0.1044,
"step": 870
},
{
"epoch": 0.7720144752714113,
"grad_norm": 0.8386934995651245,
"learning_rate": 8.10641627543036e-06,
"loss": 0.1518,
"step": 880
},
{
"epoch": 0.7807873670358592,
"grad_norm": 0.19109545648097992,
"learning_rate": 7.79342723004695e-06,
"loss": 0.1311,
"step": 890
},
{
"epoch": 0.789560258800307,
"grad_norm": 1.3321959972381592,
"learning_rate": 7.480438184663538e-06,
"loss": 0.0714,
"step": 900
},
{
"epoch": 0.798333150564755,
"grad_norm": 8.981656074523926,
"learning_rate": 7.167449139280126e-06,
"loss": 0.1203,
"step": 910
},
{
"epoch": 0.8071060423292028,
"grad_norm": 0.2177647352218628,
"learning_rate": 6.854460093896714e-06,
"loss": 0.087,
"step": 920
},
{
"epoch": 0.8158789340936506,
"grad_norm": 1.4992311000823975,
"learning_rate": 6.541471048513303e-06,
"loss": 0.1357,
"step": 930
},
{
"epoch": 0.8246518258580985,
"grad_norm": 2.294389486312866,
"learning_rate": 6.228482003129891e-06,
"loss": 0.0661,
"step": 940
},
{
"epoch": 0.8334247176225463,
"grad_norm": 0.09688606858253479,
"learning_rate": 5.915492957746479e-06,
"loss": 0.0544,
"step": 950
},
{
"epoch": 0.8421976093869942,
"grad_norm": 0.15929299592971802,
"learning_rate": 5.602503912363068e-06,
"loss": 0.07,
"step": 960
},
{
"epoch": 0.850970501151442,
"grad_norm": 0.24913333356380463,
"learning_rate": 5.289514866979656e-06,
"loss": 0.0736,
"step": 970
},
{
"epoch": 0.8597433929158899,
"grad_norm": 23.65605926513672,
"learning_rate": 4.976525821596244e-06,
"loss": 0.0595,
"step": 980
},
{
"epoch": 0.8685162846803377,
"grad_norm": 14.373787879943848,
"learning_rate": 4.663536776212833e-06,
"loss": 0.1203,
"step": 990
},
{
"epoch": 0.8772891764447857,
"grad_norm": 1.452081322669983,
"learning_rate": 4.350547730829422e-06,
"loss": 0.0877,
"step": 1000
},
{
"epoch": 0.8772891764447857,
"eval_accuracy": 0.9584635998213488,
"eval_f1": 0.951232302045097,
"eval_loss": 0.1738564521074295,
"eval_precision": 0.9245667686034659,
"eval_recall": 0.9794816414686826,
"eval_runtime": 479.7572,
"eval_samples_per_second": 4.667,
"eval_steps_per_second": 0.584,
"step": 1000
},
{
"epoch": 0.8860620682092335,
"grad_norm": 7.37061882019043,
"learning_rate": 4.0375586854460095e-06,
"loss": 0.0067,
"step": 1010
},
{
"epoch": 0.8948349599736813,
"grad_norm": 2.164485216140747,
"learning_rate": 3.724569640062598e-06,
"loss": 0.0256,
"step": 1020
},
{
"epoch": 0.9036078517381292,
"grad_norm": 20.99627685546875,
"learning_rate": 3.4115805946791864e-06,
"loss": 0.1531,
"step": 1030
},
{
"epoch": 0.912380743502577,
"grad_norm": 10.877192497253418,
"learning_rate": 3.0985915492957746e-06,
"loss": 0.1619,
"step": 1040
},
{
"epoch": 0.9211536352670249,
"grad_norm": 0.06543366611003876,
"learning_rate": 2.7856025039123637e-06,
"loss": 0.0532,
"step": 1050
},
{
"epoch": 0.9299265270314727,
"grad_norm": 1.2488692998886108,
"learning_rate": 2.4726134585289515e-06,
"loss": 0.0916,
"step": 1060
},
{
"epoch": 0.9386994187959206,
"grad_norm": 0.08849219232797623,
"learning_rate": 2.15962441314554e-06,
"loss": 0.0663,
"step": 1070
},
{
"epoch": 0.9474723105603685,
"grad_norm": 82.8930892944336,
"learning_rate": 1.8466353677621286e-06,
"loss": 0.1457,
"step": 1080
},
{
"epoch": 0.9562452023248164,
"grad_norm": 11.937994956970215,
"learning_rate": 1.5336463223787168e-06,
"loss": 0.0481,
"step": 1090
},
{
"epoch": 0.9650180940892642,
"grad_norm": 45.752872467041016,
"learning_rate": 1.2206572769953053e-06,
"loss": 0.1291,
"step": 1100
},
{
"epoch": 0.973790985853712,
"grad_norm": 0.06413407623767853,
"learning_rate": 9.076682316118937e-07,
"loss": 0.0267,
"step": 1110
},
{
"epoch": 0.9825638776181599,
"grad_norm": 11.743976593017578,
"learning_rate": 5.94679186228482e-07,
"loss": 0.1039,
"step": 1120
},
{
"epoch": 0.9913367693826077,
"grad_norm": 1.0850673913955688,
"learning_rate": 2.8169014084507043e-07,
"loss": 0.0433,
"step": 1130
},
{
"epoch": 0.9992323719706108,
"step": 1139,
"total_flos": 1.186312889546834e+17,
"train_loss": 0.3326558042530222,
"train_runtime": 10607.6074,
"train_samples_per_second": 1.719,
"train_steps_per_second": 0.107
}
],
"logging_steps": 10,
"max_steps": 1139,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.186312889546834e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}