bitwisemind's picture
Checkpoint at step 3000
340dbd8 verified
{
"best_metric": 32.61260692528645,
"best_model_checkpoint": "./whisper-lora-15k-adapters/checkpoint-2500",
"epoch": 3.51288056206089,
"eval_steps": 500,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02927400468384075,
"grad_norm": 0.49275097250938416,
"learning_rate": 0.0005,
"loss": 0.9988,
"step": 25
},
{
"epoch": 0.0585480093676815,
"grad_norm": 0.5272337794303894,
"learning_rate": 0.001,
"loss": 0.754,
"step": 50
},
{
"epoch": 0.08782201405152225,
"grad_norm": 0.4221147298812866,
"learning_rate": 0.0009925727866904337,
"loss": 0.6045,
"step": 75
},
{
"epoch": 0.117096018735363,
"grad_norm": 0.30049410462379456,
"learning_rate": 0.0009851455733808675,
"loss": 0.6137,
"step": 100
},
{
"epoch": 0.14637002341920374,
"grad_norm": 0.34277957677841187,
"learning_rate": 0.0009777183600713012,
"loss": 0.6348,
"step": 125
},
{
"epoch": 0.1756440281030445,
"grad_norm": 0.8166279196739197,
"learning_rate": 0.000970291146761735,
"loss": 0.6599,
"step": 150
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.5421469211578369,
"learning_rate": 0.0009628639334521688,
"loss": 0.6297,
"step": 175
},
{
"epoch": 0.234192037470726,
"grad_norm": 0.5719662308692932,
"learning_rate": 0.0009554367201426025,
"loss": 0.6566,
"step": 200
},
{
"epoch": 0.26346604215456676,
"grad_norm": 0.4597041606903076,
"learning_rate": 0.0009480095068330362,
"loss": 0.6108,
"step": 225
},
{
"epoch": 0.2927400468384075,
"grad_norm": 0.303480863571167,
"learning_rate": 0.00094058229352347,
"loss": 0.5868,
"step": 250
},
{
"epoch": 0.32201405152224827,
"grad_norm": 0.35486990213394165,
"learning_rate": 0.0009331550802139037,
"loss": 0.6076,
"step": 275
},
{
"epoch": 0.351288056206089,
"grad_norm": 0.5772029161453247,
"learning_rate": 0.0009257278669043375,
"loss": 0.6243,
"step": 300
},
{
"epoch": 0.3805620608899297,
"grad_norm": 0.32380449771881104,
"learning_rate": 0.0009183006535947712,
"loss": 0.6142,
"step": 325
},
{
"epoch": 0.4098360655737705,
"grad_norm": 0.2743474245071411,
"learning_rate": 0.000910873440285205,
"loss": 0.5561,
"step": 350
},
{
"epoch": 0.43911007025761123,
"grad_norm": 0.4696587026119232,
"learning_rate": 0.0009034462269756387,
"loss": 0.6096,
"step": 375
},
{
"epoch": 0.468384074941452,
"grad_norm": 0.3656092584133148,
"learning_rate": 0.0008960190136660726,
"loss": 0.6609,
"step": 400
},
{
"epoch": 0.49765807962529274,
"grad_norm": 0.704386293888092,
"learning_rate": 0.0008885918003565062,
"loss": 0.565,
"step": 425
},
{
"epoch": 0.5269320843091335,
"grad_norm": 0.6060842871665955,
"learning_rate": 0.0008811645870469401,
"loss": 0.6572,
"step": 450
},
{
"epoch": 0.5562060889929742,
"grad_norm": 0.4069805443286896,
"learning_rate": 0.0008737373737373737,
"loss": 0.557,
"step": 475
},
{
"epoch": 0.585480093676815,
"grad_norm": 0.45368218421936035,
"learning_rate": 0.0008663101604278076,
"loss": 0.5793,
"step": 500
},
{
"epoch": 0.585480093676815,
"eval_loss": 0.546061635017395,
"eval_runtime": 12387.0635,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 37.98093560260484,
"step": 500
},
{
"epoch": 0.6147540983606558,
"grad_norm": 0.5809288620948792,
"learning_rate": 0.0008588829471182412,
"loss": 0.5512,
"step": 525
},
{
"epoch": 0.6440281030444965,
"grad_norm": 0.9479708671569824,
"learning_rate": 0.000851455733808675,
"loss": 0.6098,
"step": 550
},
{
"epoch": 0.6733021077283372,
"grad_norm": 0.38643014430999756,
"learning_rate": 0.0008440285204991087,
"loss": 0.5915,
"step": 575
},
{
"epoch": 0.702576112412178,
"grad_norm": 0.5177704095840454,
"learning_rate": 0.0008366013071895425,
"loss": 0.5909,
"step": 600
},
{
"epoch": 0.7318501170960188,
"grad_norm": 0.39607977867126465,
"learning_rate": 0.0008291740938799762,
"loss": 0.5783,
"step": 625
},
{
"epoch": 0.7611241217798594,
"grad_norm": 0.5243889689445496,
"learning_rate": 0.00082174688057041,
"loss": 0.5573,
"step": 650
},
{
"epoch": 0.7903981264637002,
"grad_norm": 0.38120409846305847,
"learning_rate": 0.0008143196672608437,
"loss": 0.6463,
"step": 675
},
{
"epoch": 0.819672131147541,
"grad_norm": 0.3815406858921051,
"learning_rate": 0.0008068924539512775,
"loss": 0.6244,
"step": 700
},
{
"epoch": 0.8489461358313818,
"grad_norm": 0.49876636266708374,
"learning_rate": 0.0007994652406417113,
"loss": 0.6347,
"step": 725
},
{
"epoch": 0.8782201405152225,
"grad_norm": 0.36918649077415466,
"learning_rate": 0.000792038027332145,
"loss": 0.5391,
"step": 750
},
{
"epoch": 0.9074941451990632,
"grad_norm": 0.4347202479839325,
"learning_rate": 0.0007846108140225788,
"loss": 0.6166,
"step": 775
},
{
"epoch": 0.936768149882904,
"grad_norm": 0.4877653419971466,
"learning_rate": 0.0007771836007130125,
"loss": 0.5318,
"step": 800
},
{
"epoch": 0.9660421545667447,
"grad_norm": 0.40555697679519653,
"learning_rate": 0.0007697563874034463,
"loss": 0.5867,
"step": 825
},
{
"epoch": 0.9953161592505855,
"grad_norm": 0.47605931758880615,
"learning_rate": 0.00076232917409388,
"loss": 0.578,
"step": 850
},
{
"epoch": 1.0245901639344261,
"grad_norm": 0.43946486711502075,
"learning_rate": 0.0007549019607843137,
"loss": 0.4755,
"step": 875
},
{
"epoch": 1.053864168618267,
"grad_norm": 0.3787698745727539,
"learning_rate": 0.0007474747474747475,
"loss": 0.554,
"step": 900
},
{
"epoch": 1.0831381733021077,
"grad_norm": 0.41880446672439575,
"learning_rate": 0.0007400475341651812,
"loss": 0.4911,
"step": 925
},
{
"epoch": 1.1124121779859484,
"grad_norm": 0.4066482484340668,
"learning_rate": 0.000732620320855615,
"loss": 0.5064,
"step": 950
},
{
"epoch": 1.1416861826697893,
"grad_norm": 0.2495754361152649,
"learning_rate": 0.0007251931075460487,
"loss": 0.5102,
"step": 975
},
{
"epoch": 1.17096018735363,
"grad_norm": 0.44539883732795715,
"learning_rate": 0.0007177658942364825,
"loss": 0.4371,
"step": 1000
},
{
"epoch": 1.17096018735363,
"eval_loss": 0.5167025923728943,
"eval_runtime": 12236.3244,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 34.90263315191745,
"step": 1000
},
{
"epoch": 1.2002341920374708,
"grad_norm": 0.4373762905597687,
"learning_rate": 0.0007103386809269162,
"loss": 0.5205,
"step": 1025
},
{
"epoch": 1.2295081967213115,
"grad_norm": 0.4179486632347107,
"learning_rate": 0.0007029114676173501,
"loss": 0.5313,
"step": 1050
},
{
"epoch": 1.2587822014051522,
"grad_norm": 0.45639654994010925,
"learning_rate": 0.0006954842543077837,
"loss": 0.5617,
"step": 1075
},
{
"epoch": 1.288056206088993,
"grad_norm": 0.3721451461315155,
"learning_rate": 0.0006880570409982176,
"loss": 0.4312,
"step": 1100
},
{
"epoch": 1.3173302107728337,
"grad_norm": 0.5349363088607788,
"learning_rate": 0.0006806298276886512,
"loss": 0.4598,
"step": 1125
},
{
"epoch": 1.3466042154566744,
"grad_norm": 0.5650537610054016,
"learning_rate": 0.0006732026143790851,
"loss": 0.5108,
"step": 1150
},
{
"epoch": 1.3758782201405153,
"grad_norm": 0.39053699374198914,
"learning_rate": 0.0006657754010695187,
"loss": 0.545,
"step": 1175
},
{
"epoch": 1.405152224824356,
"grad_norm": 0.38576140999794006,
"learning_rate": 0.0006583481877599526,
"loss": 0.5929,
"step": 1200
},
{
"epoch": 1.4344262295081966,
"grad_norm": 0.5037420988082886,
"learning_rate": 0.0006509209744503862,
"loss": 0.5336,
"step": 1225
},
{
"epoch": 1.4637002341920375,
"grad_norm": 0.48775750398635864,
"learning_rate": 0.00064349376114082,
"loss": 0.5012,
"step": 1250
},
{
"epoch": 1.4929742388758782,
"grad_norm": 0.27323758602142334,
"learning_rate": 0.0006360665478312537,
"loss": 0.529,
"step": 1275
},
{
"epoch": 1.5222482435597189,
"grad_norm": 0.44582176208496094,
"learning_rate": 0.0006286393345216874,
"loss": 0.4993,
"step": 1300
},
{
"epoch": 1.5515222482435598,
"grad_norm": 0.4263412654399872,
"learning_rate": 0.0006212121212121212,
"loss": 0.5914,
"step": 1325
},
{
"epoch": 1.5807962529274004,
"grad_norm": 0.43889227509498596,
"learning_rate": 0.0006137849079025549,
"loss": 0.5004,
"step": 1350
},
{
"epoch": 1.6100702576112411,
"grad_norm": 0.4256519377231598,
"learning_rate": 0.0006063576945929888,
"loss": 0.5172,
"step": 1375
},
{
"epoch": 1.639344262295082,
"grad_norm": 0.5018269419670105,
"learning_rate": 0.0005989304812834224,
"loss": 0.4943,
"step": 1400
},
{
"epoch": 1.6686182669789227,
"grad_norm": 0.3621992766857147,
"learning_rate": 0.0005915032679738563,
"loss": 0.5243,
"step": 1425
},
{
"epoch": 1.6978922716627634,
"grad_norm": 0.33811846375465393,
"learning_rate": 0.0005840760546642899,
"loss": 0.5376,
"step": 1450
},
{
"epoch": 1.7271662763466042,
"grad_norm": 0.4339434802532196,
"learning_rate": 0.0005766488413547238,
"loss": 0.5507,
"step": 1475
},
{
"epoch": 1.756440281030445,
"grad_norm": 0.42697080969810486,
"learning_rate": 0.0005692216280451574,
"loss": 0.4969,
"step": 1500
},
{
"epoch": 1.756440281030445,
"eval_loss": 0.467955082654953,
"eval_runtime": 12364.0154,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 35.736752191336834,
"step": 1500
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.48440080881118774,
"learning_rate": 0.0005617944147355913,
"loss": 0.5879,
"step": 1525
},
{
"epoch": 1.8149882903981265,
"grad_norm": 0.5575680732727051,
"learning_rate": 0.0005543672014260249,
"loss": 0.4912,
"step": 1550
},
{
"epoch": 1.8442622950819674,
"grad_norm": 0.5282599329948425,
"learning_rate": 0.0005469399881164587,
"loss": 0.462,
"step": 1575
},
{
"epoch": 1.8735362997658078,
"grad_norm": 0.5473257899284363,
"learning_rate": 0.0005395127748068924,
"loss": 0.595,
"step": 1600
},
{
"epoch": 1.9028103044496487,
"grad_norm": 0.38413456082344055,
"learning_rate": 0.0005320855614973262,
"loss": 0.4465,
"step": 1625
},
{
"epoch": 1.9320843091334896,
"grad_norm": 0.3802899420261383,
"learning_rate": 0.0005246583481877599,
"loss": 0.5235,
"step": 1650
},
{
"epoch": 1.96135831381733,
"grad_norm": 0.3901960551738739,
"learning_rate": 0.0005172311348781937,
"loss": 0.5095,
"step": 1675
},
{
"epoch": 1.990632318501171,
"grad_norm": 0.3652135133743286,
"learning_rate": 0.0005098039215686275,
"loss": 0.4862,
"step": 1700
},
{
"epoch": 2.019906323185012,
"grad_norm": 0.44296035170555115,
"learning_rate": 0.0005023767082590612,
"loss": 0.4718,
"step": 1725
},
{
"epoch": 2.0491803278688523,
"grad_norm": 0.38306355476379395,
"learning_rate": 0.000494949494949495,
"loss": 0.4404,
"step": 1750
},
{
"epoch": 2.078454332552693,
"grad_norm": 0.38407984375953674,
"learning_rate": 0.0004875222816399287,
"loss": 0.4578,
"step": 1775
},
{
"epoch": 2.107728337236534,
"grad_norm": 0.36647218465805054,
"learning_rate": 0.00048009506833036246,
"loss": 0.4596,
"step": 1800
},
{
"epoch": 2.1370023419203745,
"grad_norm": 0.44638949632644653,
"learning_rate": 0.0004726678550207962,
"loss": 0.4993,
"step": 1825
},
{
"epoch": 2.1662763466042154,
"grad_norm": 0.47006988525390625,
"learning_rate": 0.00046524064171123,
"loss": 0.4288,
"step": 1850
},
{
"epoch": 2.1955503512880563,
"grad_norm": 0.5148488283157349,
"learning_rate": 0.0004578134284016637,
"loss": 0.447,
"step": 1875
},
{
"epoch": 2.2248243559718968,
"grad_norm": 0.3988969326019287,
"learning_rate": 0.00045038621509209745,
"loss": 0.4559,
"step": 1900
},
{
"epoch": 2.2540983606557377,
"grad_norm": 0.45835059881210327,
"learning_rate": 0.0004429590017825312,
"loss": 0.4508,
"step": 1925
},
{
"epoch": 2.2833723653395785,
"grad_norm": 0.5815873742103577,
"learning_rate": 0.00043553178847296494,
"loss": 0.5091,
"step": 1950
},
{
"epoch": 2.312646370023419,
"grad_norm": 0.41291117668151855,
"learning_rate": 0.0004281045751633987,
"loss": 0.4559,
"step": 1975
},
{
"epoch": 2.34192037470726,
"grad_norm": 0.4919784367084503,
"learning_rate": 0.00042067736185383243,
"loss": 0.4465,
"step": 2000
},
{
"epoch": 2.34192037470726,
"eval_loss": 0.45896556973457336,
"eval_runtime": 12387.8081,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 33.32109271470311,
"step": 2000
},
{
"epoch": 2.371194379391101,
"grad_norm": 0.5504110455513,
"learning_rate": 0.0004132501485442662,
"loss": 0.4836,
"step": 2025
},
{
"epoch": 2.4004683840749417,
"grad_norm": 0.42781633138656616,
"learning_rate": 0.0004058229352346999,
"loss": 0.4341,
"step": 2050
},
{
"epoch": 2.429742388758782,
"grad_norm": 0.586798906326294,
"learning_rate": 0.00039839572192513367,
"loss": 0.435,
"step": 2075
},
{
"epoch": 2.459016393442623,
"grad_norm": 0.43749555945396423,
"learning_rate": 0.0003909685086155674,
"loss": 0.4788,
"step": 2100
},
{
"epoch": 2.4882903981264635,
"grad_norm": 0.4740693271160126,
"learning_rate": 0.00038354129530600116,
"loss": 0.4561,
"step": 2125
},
{
"epoch": 2.5175644028103044,
"grad_norm": 0.39487144351005554,
"learning_rate": 0.0003761140819964349,
"loss": 0.4764,
"step": 2150
},
{
"epoch": 2.5468384074941453,
"grad_norm": 0.4164140224456787,
"learning_rate": 0.0003686868686868687,
"loss": 0.4176,
"step": 2175
},
{
"epoch": 2.576112412177986,
"grad_norm": 0.43643561005592346,
"learning_rate": 0.00036125965537730245,
"loss": 0.4034,
"step": 2200
},
{
"epoch": 2.6053864168618266,
"grad_norm": 0.4501596689224243,
"learning_rate": 0.0003538324420677362,
"loss": 0.4369,
"step": 2225
},
{
"epoch": 2.6346604215456675,
"grad_norm": 0.40837183594703674,
"learning_rate": 0.00034640522875816995,
"loss": 0.4488,
"step": 2250
},
{
"epoch": 2.663934426229508,
"grad_norm": 0.3215262293815613,
"learning_rate": 0.0003389780154486037,
"loss": 0.4407,
"step": 2275
},
{
"epoch": 2.693208430913349,
"grad_norm": 0.4755331575870514,
"learning_rate": 0.00033155080213903744,
"loss": 0.4764,
"step": 2300
},
{
"epoch": 2.7224824355971897,
"grad_norm": 0.36339104175567627,
"learning_rate": 0.0003241235888294712,
"loss": 0.4749,
"step": 2325
},
{
"epoch": 2.7517564402810306,
"grad_norm": 0.5450248718261719,
"learning_rate": 0.00031669637551990493,
"loss": 0.4346,
"step": 2350
},
{
"epoch": 2.781030444964871,
"grad_norm": 0.40659910440444946,
"learning_rate": 0.0003092691622103387,
"loss": 0.4816,
"step": 2375
},
{
"epoch": 2.810304449648712,
"grad_norm": 0.3632465898990631,
"learning_rate": 0.0003018419489007724,
"loss": 0.434,
"step": 2400
},
{
"epoch": 2.839578454332553,
"grad_norm": 0.4366215467453003,
"learning_rate": 0.00029441473559120617,
"loss": 0.4002,
"step": 2425
},
{
"epoch": 2.8688524590163933,
"grad_norm": 0.31405115127563477,
"learning_rate": 0.0002869875222816399,
"loss": 0.4171,
"step": 2450
},
{
"epoch": 2.898126463700234,
"grad_norm": 0.3219514489173889,
"learning_rate": 0.00027956030897207366,
"loss": 0.4582,
"step": 2475
},
{
"epoch": 2.927400468384075,
"grad_norm": 0.3368944227695465,
"learning_rate": 0.00027213309566250746,
"loss": 0.4366,
"step": 2500
},
{
"epoch": 2.927400468384075,
"eval_loss": 0.41895976662635803,
"eval_runtime": 12350.2312,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 32.61260692528645,
"step": 2500
},
{
"epoch": 2.9566744730679155,
"grad_norm": 0.6570879220962524,
"learning_rate": 0.0002647058823529412,
"loss": 0.4611,
"step": 2525
},
{
"epoch": 2.9859484777517564,
"grad_norm": 0.4216877818107605,
"learning_rate": 0.00025727866904337495,
"loss": 0.4748,
"step": 2550
},
{
"epoch": 3.0152224824355973,
"grad_norm": 0.46946263313293457,
"learning_rate": 0.0002498514557338087,
"loss": 0.4575,
"step": 2575
},
{
"epoch": 3.0444964871194378,
"grad_norm": 0.396932989358902,
"learning_rate": 0.00024242424242424245,
"loss": 0.3939,
"step": 2600
},
{
"epoch": 3.0737704918032787,
"grad_norm": 0.4928429126739502,
"learning_rate": 0.0002349970291146762,
"loss": 0.3568,
"step": 2625
},
{
"epoch": 3.1030444964871196,
"grad_norm": 0.4007248878479004,
"learning_rate": 0.00022756981580510994,
"loss": 0.3692,
"step": 2650
},
{
"epoch": 3.13231850117096,
"grad_norm": 0.40141820907592773,
"learning_rate": 0.00022014260249554368,
"loss": 0.3905,
"step": 2675
},
{
"epoch": 3.161592505854801,
"grad_norm": 0.37623295187950134,
"learning_rate": 0.00021271538918597743,
"loss": 0.3566,
"step": 2700
},
{
"epoch": 3.190866510538642,
"grad_norm": 0.3837551772594452,
"learning_rate": 0.00020528817587641118,
"loss": 0.432,
"step": 2725
},
{
"epoch": 3.2201405152224822,
"grad_norm": 0.6860864758491516,
"learning_rate": 0.00019786096256684495,
"loss": 0.4528,
"step": 2750
},
{
"epoch": 3.249414519906323,
"grad_norm": 0.4655303657054901,
"learning_rate": 0.00019043374925727867,
"loss": 0.3674,
"step": 2775
},
{
"epoch": 3.278688524590164,
"grad_norm": 0.43825146555900574,
"learning_rate": 0.00018300653594771241,
"loss": 0.3758,
"step": 2800
},
{
"epoch": 3.307962529274005,
"grad_norm": 0.5083994269371033,
"learning_rate": 0.00017557932263814616,
"loss": 0.3824,
"step": 2825
},
{
"epoch": 3.3372365339578454,
"grad_norm": 0.5229085683822632,
"learning_rate": 0.0001681521093285799,
"loss": 0.3969,
"step": 2850
},
{
"epoch": 3.3665105386416863,
"grad_norm": 0.584431529045105,
"learning_rate": 0.00016072489601901365,
"loss": 0.4285,
"step": 2875
},
{
"epoch": 3.3957845433255267,
"grad_norm": 0.4564405679702759,
"learning_rate": 0.0001532976827094474,
"loss": 0.4189,
"step": 2900
},
{
"epoch": 3.4250585480093676,
"grad_norm": 0.34044119715690613,
"learning_rate": 0.00014587046939988117,
"loss": 0.4008,
"step": 2925
},
{
"epoch": 3.4543325526932085,
"grad_norm": 0.34367942810058594,
"learning_rate": 0.00013844325609031492,
"loss": 0.3847,
"step": 2950
},
{
"epoch": 3.4836065573770494,
"grad_norm": 0.31558510661125183,
"learning_rate": 0.00013101604278074866,
"loss": 0.3474,
"step": 2975
},
{
"epoch": 3.51288056206089,
"grad_norm": 0.46547606587409973,
"learning_rate": 0.0001235888294711824,
"loss": 0.3753,
"step": 3000
},
{
"epoch": 3.51288056206089,
"eval_loss": 0.4121188521385193,
"eval_runtime": 12280.3025,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 32.636280420441274,
"step": 3000
}
],
"logging_steps": 25,
"max_steps": 3416,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.964133371904e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}