| { | |
| "best_metric": 33.32109271470311, | |
| "best_model_checkpoint": "./whisper-lora-15k-adapters/checkpoint-2000", | |
| "epoch": 2.34192037470726, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02927400468384075, | |
| "grad_norm": 0.49275097250938416, | |
| "learning_rate": 0.0005, | |
| "loss": 0.9988, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.0585480093676815, | |
| "grad_norm": 0.5272337794303894, | |
| "learning_rate": 0.001, | |
| "loss": 0.754, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.08782201405152225, | |
| "grad_norm": 0.4221147298812866, | |
| "learning_rate": 0.0009925727866904337, | |
| "loss": 0.6045, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.117096018735363, | |
| "grad_norm": 0.30049410462379456, | |
| "learning_rate": 0.0009851455733808675, | |
| "loss": 0.6137, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14637002341920374, | |
| "grad_norm": 0.34277957677841187, | |
| "learning_rate": 0.0009777183600713012, | |
| "loss": 0.6348, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.1756440281030445, | |
| "grad_norm": 0.8166279196739197, | |
| "learning_rate": 0.000970291146761735, | |
| "loss": 0.6599, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.20491803278688525, | |
| "grad_norm": 0.5421469211578369, | |
| "learning_rate": 0.0009628639334521688, | |
| "loss": 0.6297, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.234192037470726, | |
| "grad_norm": 0.5719662308692932, | |
| "learning_rate": 0.0009554367201426025, | |
| "loss": 0.6566, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.26346604215456676, | |
| "grad_norm": 0.4597041606903076, | |
| "learning_rate": 0.0009480095068330362, | |
| "loss": 0.6108, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.2927400468384075, | |
| "grad_norm": 0.303480863571167, | |
| "learning_rate": 0.00094058229352347, | |
| "loss": 0.5868, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.32201405152224827, | |
| "grad_norm": 0.35486990213394165, | |
| "learning_rate": 0.0009331550802139037, | |
| "loss": 0.6076, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.351288056206089, | |
| "grad_norm": 0.5772029161453247, | |
| "learning_rate": 0.0009257278669043375, | |
| "loss": 0.6243, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.3805620608899297, | |
| "grad_norm": 0.32380449771881104, | |
| "learning_rate": 0.0009183006535947712, | |
| "loss": 0.6142, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.4098360655737705, | |
| "grad_norm": 0.2743474245071411, | |
| "learning_rate": 0.000910873440285205, | |
| "loss": 0.5561, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.43911007025761123, | |
| "grad_norm": 0.4696587026119232, | |
| "learning_rate": 0.0009034462269756387, | |
| "loss": 0.6096, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.468384074941452, | |
| "grad_norm": 0.3656092584133148, | |
| "learning_rate": 0.0008960190136660726, | |
| "loss": 0.6609, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.49765807962529274, | |
| "grad_norm": 0.704386293888092, | |
| "learning_rate": 0.0008885918003565062, | |
| "loss": 0.565, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.5269320843091335, | |
| "grad_norm": 0.6060842871665955, | |
| "learning_rate": 0.0008811645870469401, | |
| "loss": 0.6572, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5562060889929742, | |
| "grad_norm": 0.4069805443286896, | |
| "learning_rate": 0.0008737373737373737, | |
| "loss": 0.557, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.585480093676815, | |
| "grad_norm": 0.45368218421936035, | |
| "learning_rate": 0.0008663101604278076, | |
| "loss": 0.5793, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.585480093676815, | |
| "eval_loss": 0.546061635017395, | |
| "eval_runtime": 12387.0635, | |
| "eval_samples_per_second": 0.123, | |
| "eval_steps_per_second": 0.008, | |
| "eval_wer": 37.98093560260484, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6147540983606558, | |
| "grad_norm": 0.5809288620948792, | |
| "learning_rate": 0.0008588829471182412, | |
| "loss": 0.5512, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.6440281030444965, | |
| "grad_norm": 0.9479708671569824, | |
| "learning_rate": 0.000851455733808675, | |
| "loss": 0.6098, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6733021077283372, | |
| "grad_norm": 0.38643014430999756, | |
| "learning_rate": 0.0008440285204991087, | |
| "loss": 0.5915, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.702576112412178, | |
| "grad_norm": 0.5177704095840454, | |
| "learning_rate": 0.0008366013071895425, | |
| "loss": 0.5909, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7318501170960188, | |
| "grad_norm": 0.39607977867126465, | |
| "learning_rate": 0.0008291740938799762, | |
| "loss": 0.5783, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.7611241217798594, | |
| "grad_norm": 0.5243889689445496, | |
| "learning_rate": 0.00082174688057041, | |
| "loss": 0.5573, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.7903981264637002, | |
| "grad_norm": 0.38120409846305847, | |
| "learning_rate": 0.0008143196672608437, | |
| "loss": 0.6463, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.819672131147541, | |
| "grad_norm": 0.3815406858921051, | |
| "learning_rate": 0.0008068924539512775, | |
| "loss": 0.6244, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8489461358313818, | |
| "grad_norm": 0.49876636266708374, | |
| "learning_rate": 0.0007994652406417113, | |
| "loss": 0.6347, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.8782201405152225, | |
| "grad_norm": 0.36918649077415466, | |
| "learning_rate": 0.000792038027332145, | |
| "loss": 0.5391, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.9074941451990632, | |
| "grad_norm": 0.4347202479839325, | |
| "learning_rate": 0.0007846108140225788, | |
| "loss": 0.6166, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.936768149882904, | |
| "grad_norm": 0.4877653419971466, | |
| "learning_rate": 0.0007771836007130125, | |
| "loss": 0.5318, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9660421545667447, | |
| "grad_norm": 0.40555697679519653, | |
| "learning_rate": 0.0007697563874034463, | |
| "loss": 0.5867, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.9953161592505855, | |
| "grad_norm": 0.47605931758880615, | |
| "learning_rate": 0.00076232917409388, | |
| "loss": 0.578, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.0245901639344261, | |
| "grad_norm": 0.43946486711502075, | |
| "learning_rate": 0.0007549019607843137, | |
| "loss": 0.4755, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 1.053864168618267, | |
| "grad_norm": 0.3787698745727539, | |
| "learning_rate": 0.0007474747474747475, | |
| "loss": 0.554, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.0831381733021077, | |
| "grad_norm": 0.41880446672439575, | |
| "learning_rate": 0.0007400475341651812, | |
| "loss": 0.4911, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 1.1124121779859484, | |
| "grad_norm": 0.4066482484340668, | |
| "learning_rate": 0.000732620320855615, | |
| "loss": 0.5064, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.1416861826697893, | |
| "grad_norm": 0.2495754361152649, | |
| "learning_rate": 0.0007251931075460487, | |
| "loss": 0.5102, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 1.17096018735363, | |
| "grad_norm": 0.44539883732795715, | |
| "learning_rate": 0.0007177658942364825, | |
| "loss": 0.4371, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.17096018735363, | |
| "eval_loss": 0.5167025923728943, | |
| "eval_runtime": 12236.3244, | |
| "eval_samples_per_second": 0.124, | |
| "eval_steps_per_second": 0.008, | |
| "eval_wer": 34.90263315191745, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.2002341920374708, | |
| "grad_norm": 0.4373762905597687, | |
| "learning_rate": 0.0007103386809269162, | |
| "loss": 0.5205, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 1.2295081967213115, | |
| "grad_norm": 0.4179486632347107, | |
| "learning_rate": 0.0007029114676173501, | |
| "loss": 0.5313, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.2587822014051522, | |
| "grad_norm": 0.45639654994010925, | |
| "learning_rate": 0.0006954842543077837, | |
| "loss": 0.5617, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 1.288056206088993, | |
| "grad_norm": 0.3721451461315155, | |
| "learning_rate": 0.0006880570409982176, | |
| "loss": 0.4312, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.3173302107728337, | |
| "grad_norm": 0.5349363088607788, | |
| "learning_rate": 0.0006806298276886512, | |
| "loss": 0.4598, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 1.3466042154566744, | |
| "grad_norm": 0.5650537610054016, | |
| "learning_rate": 0.0006732026143790851, | |
| "loss": 0.5108, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.3758782201405153, | |
| "grad_norm": 0.39053699374198914, | |
| "learning_rate": 0.0006657754010695187, | |
| "loss": 0.545, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 1.405152224824356, | |
| "grad_norm": 0.38576140999794006, | |
| "learning_rate": 0.0006583481877599526, | |
| "loss": 0.5929, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.4344262295081966, | |
| "grad_norm": 0.5037420988082886, | |
| "learning_rate": 0.0006509209744503862, | |
| "loss": 0.5336, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 1.4637002341920375, | |
| "grad_norm": 0.48775750398635864, | |
| "learning_rate": 0.00064349376114082, | |
| "loss": 0.5012, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.4929742388758782, | |
| "grad_norm": 0.27323758602142334, | |
| "learning_rate": 0.0006360665478312537, | |
| "loss": 0.529, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 1.5222482435597189, | |
| "grad_norm": 0.44582176208496094, | |
| "learning_rate": 0.0006286393345216874, | |
| "loss": 0.4993, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.5515222482435598, | |
| "grad_norm": 0.4263412654399872, | |
| "learning_rate": 0.0006212121212121212, | |
| "loss": 0.5914, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 1.5807962529274004, | |
| "grad_norm": 0.43889227509498596, | |
| "learning_rate": 0.0006137849079025549, | |
| "loss": 0.5004, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.6100702576112411, | |
| "grad_norm": 0.4256519377231598, | |
| "learning_rate": 0.0006063576945929888, | |
| "loss": 0.5172, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 1.639344262295082, | |
| "grad_norm": 0.5018269419670105, | |
| "learning_rate": 0.0005989304812834224, | |
| "loss": 0.4943, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.6686182669789227, | |
| "grad_norm": 0.3621992766857147, | |
| "learning_rate": 0.0005915032679738563, | |
| "loss": 0.5243, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 1.6978922716627634, | |
| "grad_norm": 0.33811846375465393, | |
| "learning_rate": 0.0005840760546642899, | |
| "loss": 0.5376, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.7271662763466042, | |
| "grad_norm": 0.4339434802532196, | |
| "learning_rate": 0.0005766488413547238, | |
| "loss": 0.5507, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 1.756440281030445, | |
| "grad_norm": 0.42697080969810486, | |
| "learning_rate": 0.0005692216280451574, | |
| "loss": 0.4969, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.756440281030445, | |
| "eval_loss": 0.467955082654953, | |
| "eval_runtime": 12364.0154, | |
| "eval_samples_per_second": 0.123, | |
| "eval_steps_per_second": 0.008, | |
| "eval_wer": 35.736752191336834, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.7857142857142856, | |
| "grad_norm": 0.48440080881118774, | |
| "learning_rate": 0.0005617944147355913, | |
| "loss": 0.5879, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 1.8149882903981265, | |
| "grad_norm": 0.5575680732727051, | |
| "learning_rate": 0.0005543672014260249, | |
| "loss": 0.4912, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.8442622950819674, | |
| "grad_norm": 0.5282599329948425, | |
| "learning_rate": 0.0005469399881164587, | |
| "loss": 0.462, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 1.8735362997658078, | |
| "grad_norm": 0.5473257899284363, | |
| "learning_rate": 0.0005395127748068924, | |
| "loss": 0.595, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.9028103044496487, | |
| "grad_norm": 0.38413456082344055, | |
| "learning_rate": 0.0005320855614973262, | |
| "loss": 0.4465, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 1.9320843091334896, | |
| "grad_norm": 0.3802899420261383, | |
| "learning_rate": 0.0005246583481877599, | |
| "loss": 0.5235, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.96135831381733, | |
| "grad_norm": 0.3901960551738739, | |
| "learning_rate": 0.0005172311348781937, | |
| "loss": 0.5095, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 1.990632318501171, | |
| "grad_norm": 0.3652135133743286, | |
| "learning_rate": 0.0005098039215686275, | |
| "loss": 0.4862, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.019906323185012, | |
| "grad_norm": 0.44296035170555115, | |
| "learning_rate": 0.0005023767082590612, | |
| "loss": 0.4718, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 2.0491803278688523, | |
| "grad_norm": 0.38306355476379395, | |
| "learning_rate": 0.000494949494949495, | |
| "loss": 0.4404, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.078454332552693, | |
| "grad_norm": 0.38407984375953674, | |
| "learning_rate": 0.0004875222816399287, | |
| "loss": 0.4578, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 2.107728337236534, | |
| "grad_norm": 0.36647218465805054, | |
| "learning_rate": 0.00048009506833036246, | |
| "loss": 0.4596, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.1370023419203745, | |
| "grad_norm": 0.44638949632644653, | |
| "learning_rate": 0.0004726678550207962, | |
| "loss": 0.4993, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 2.1662763466042154, | |
| "grad_norm": 0.47006988525390625, | |
| "learning_rate": 0.00046524064171123, | |
| "loss": 0.4288, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.1955503512880563, | |
| "grad_norm": 0.5148488283157349, | |
| "learning_rate": 0.0004578134284016637, | |
| "loss": 0.447, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 2.2248243559718968, | |
| "grad_norm": 0.3988969326019287, | |
| "learning_rate": 0.00045038621509209745, | |
| "loss": 0.4559, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.2540983606557377, | |
| "grad_norm": 0.45835059881210327, | |
| "learning_rate": 0.0004429590017825312, | |
| "loss": 0.4508, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 2.2833723653395785, | |
| "grad_norm": 0.5815873742103577, | |
| "learning_rate": 0.00043553178847296494, | |
| "loss": 0.5091, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.312646370023419, | |
| "grad_norm": 0.41291117668151855, | |
| "learning_rate": 0.0004281045751633987, | |
| "loss": 0.4559, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 2.34192037470726, | |
| "grad_norm": 0.4919784367084503, | |
| "learning_rate": 0.00042067736185383243, | |
| "loss": 0.4465, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.34192037470726, | |
| "eval_loss": 0.45896556973457336, | |
| "eval_runtime": 12387.8081, | |
| "eval_samples_per_second": 0.123, | |
| "eval_steps_per_second": 0.008, | |
| "eval_wer": 33.32109271470311, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 3416, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.309422247936e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |