| { | |
| "best_global_step": 1000, | |
| "best_metric": 0.7969154715538025, | |
| "best_model_checkpoint": "speecht5_finetuned_nepali/checkpoint-1000", | |
| "epoch": 2000.0, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 25.0, | |
| "grad_norm": 10.529245376586914, | |
| "learning_rate": 9.5e-06, | |
| "loss": 1.0362, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "grad_norm": 14.513840675354004, | |
| "learning_rate": 2.2000000000000003e-05, | |
| "loss": 0.4845, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 75.0, | |
| "grad_norm": 22.360095977783203, | |
| "learning_rate": 3.45e-05, | |
| "loss": 0.415, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 100.0, | |
| "grad_norm": 3.0262670516967773, | |
| "learning_rate": 4.7e-05, | |
| "loss": 0.2888, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 125.0, | |
| "grad_norm": 3.495602607727051, | |
| "learning_rate": 5.95e-05, | |
| "loss": 0.3118, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 150.0, | |
| "grad_norm": 12.870991706848145, | |
| "learning_rate": 7.2e-05, | |
| "loss": 0.2475, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 175.0, | |
| "grad_norm": 25.45779800415039, | |
| "learning_rate": 8.450000000000001e-05, | |
| "loss": 0.2191, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 200.0, | |
| "grad_norm": 16.621234893798828, | |
| "learning_rate": 9.7e-05, | |
| "loss": 0.2357, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 225.0, | |
| "grad_norm": 3.157877206802368, | |
| "learning_rate": 9.894444444444446e-05, | |
| "loss": 0.2045, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 250.0, | |
| "grad_norm": 3.6257405281066895, | |
| "learning_rate": 9.755555555555555e-05, | |
| "loss": 0.2164, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 275.0, | |
| "grad_norm": 3.596989870071411, | |
| "learning_rate": 9.616666666666667e-05, | |
| "loss": 0.2195, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 300.0, | |
| "grad_norm": 9.64238166809082, | |
| "learning_rate": 9.477777777777779e-05, | |
| "loss": 0.2001, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 325.0, | |
| "grad_norm": 2.2049977779388428, | |
| "learning_rate": 9.33888888888889e-05, | |
| "loss": 0.2109, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 350.0, | |
| "grad_norm": 13.762038230895996, | |
| "learning_rate": 9.200000000000001e-05, | |
| "loss": 0.1858, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 375.0, | |
| "grad_norm": 16.161832809448242, | |
| "learning_rate": 9.061111111111112e-05, | |
| "loss": 0.2076, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 400.0, | |
| "grad_norm": 4.481337070465088, | |
| "learning_rate": 8.922222222222223e-05, | |
| "loss": 0.1771, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 425.0, | |
| "grad_norm": 4.335459232330322, | |
| "learning_rate": 8.783333333333333e-05, | |
| "loss": 0.1854, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 450.0, | |
| "grad_norm": 4.443661212921143, | |
| "learning_rate": 8.644444444444445e-05, | |
| "loss": 0.1635, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 475.0, | |
| "grad_norm": 5.2760796546936035, | |
| "learning_rate": 8.505555555555556e-05, | |
| "loss": 0.1767, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 500.0, | |
| "grad_norm": 13.874415397644043, | |
| "learning_rate": 8.366666666666668e-05, | |
| "loss": 0.1732, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 500.0, | |
| "eval_loss": 0.8349313735961914, | |
| "eval_runtime": 0.0239, | |
| "eval_samples_per_second": 41.882, | |
| "eval_steps_per_second": 41.882, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 525.0, | |
| "grad_norm": 7.144843101501465, | |
| "learning_rate": 8.227777777777778e-05, | |
| "loss": 0.1622, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 550.0, | |
| "grad_norm": 9.63769245147705, | |
| "learning_rate": 8.088888888888889e-05, | |
| "loss": 0.1594, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 575.0, | |
| "grad_norm": 18.466543197631836, | |
| "learning_rate": 7.950000000000001e-05, | |
| "loss": 0.1584, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 600.0, | |
| "grad_norm": 4.807222843170166, | |
| "learning_rate": 7.811111111111111e-05, | |
| "loss": 0.1928, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 625.0, | |
| "grad_norm": 7.026573657989502, | |
| "learning_rate": 7.672222222222223e-05, | |
| "loss": 0.1403, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 650.0, | |
| "grad_norm": 1.3481919765472412, | |
| "learning_rate": 7.533333333333334e-05, | |
| "loss": 0.1454, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 675.0, | |
| "grad_norm": 1.3561595678329468, | |
| "learning_rate": 7.394444444444445e-05, | |
| "loss": 0.128, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 700.0, | |
| "grad_norm": 8.730093955993652, | |
| "learning_rate": 7.255555555555555e-05, | |
| "loss": 0.1306, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 725.0, | |
| "grad_norm": 1.6442005634307861, | |
| "learning_rate": 7.116666666666667e-05, | |
| "loss": 0.1332, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 750.0, | |
| "grad_norm": 2.5606157779693604, | |
| "learning_rate": 6.977777777777779e-05, | |
| "loss": 0.1289, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 775.0, | |
| "grad_norm": 10.112384796142578, | |
| "learning_rate": 6.83888888888889e-05, | |
| "loss": 0.1354, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 800.0, | |
| "grad_norm": 1.0964512825012207, | |
| "learning_rate": 6.7e-05, | |
| "loss": 0.1147, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 825.0, | |
| "grad_norm": 1.3629651069641113, | |
| "learning_rate": 6.561111111111111e-05, | |
| "loss": 0.1129, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 850.0, | |
| "grad_norm": 12.741216659545898, | |
| "learning_rate": 6.422222222222223e-05, | |
| "loss": 0.1098, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 875.0, | |
| "grad_norm": 2.6955878734588623, | |
| "learning_rate": 6.283333333333333e-05, | |
| "loss": 0.1103, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 900.0, | |
| "grad_norm": 1.8039289712905884, | |
| "learning_rate": 6.144444444444445e-05, | |
| "loss": 0.114, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 925.0, | |
| "grad_norm": 2.290956735610962, | |
| "learning_rate": 6.005555555555555e-05, | |
| "loss": 0.1182, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 950.0, | |
| "grad_norm": 0.9769994616508484, | |
| "learning_rate": 5.866666666666667e-05, | |
| "loss": 0.1236, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 975.0, | |
| "grad_norm": 0.7922130227088928, | |
| "learning_rate": 5.7277777777777785e-05, | |
| "loss": 0.1029, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 1000.0, | |
| "grad_norm": 0.9130666255950928, | |
| "learning_rate": 5.588888888888889e-05, | |
| "loss": 0.1032, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1000.0, | |
| "eval_loss": 0.7969154715538025, | |
| "eval_runtime": 0.0205, | |
| "eval_samples_per_second": 48.798, | |
| "eval_steps_per_second": 48.798, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1025.0, | |
| "grad_norm": 3.981950283050537, | |
| "learning_rate": 5.45e-05, | |
| "loss": 0.1481, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 1050.0, | |
| "grad_norm": 1.1825660467147827, | |
| "learning_rate": 5.311111111111111e-05, | |
| "loss": 0.104, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1075.0, | |
| "grad_norm": 2.0291762351989746, | |
| "learning_rate": 5.172222222222223e-05, | |
| "loss": 0.1049, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 1100.0, | |
| "grad_norm": 0.8333075046539307, | |
| "learning_rate": 5.0333333333333335e-05, | |
| "loss": 0.0917, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1125.0, | |
| "grad_norm": 19.428544998168945, | |
| "learning_rate": 4.894444444444445e-05, | |
| "loss": 0.1106, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 1150.0, | |
| "grad_norm": 1.1518399715423584, | |
| "learning_rate": 4.755555555555556e-05, | |
| "loss": 0.0911, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1175.0, | |
| "grad_norm": 11.079072952270508, | |
| "learning_rate": 4.6166666666666666e-05, | |
| "loss": 0.0853, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 1200.0, | |
| "grad_norm": 3.140981435775757, | |
| "learning_rate": 4.477777777777778e-05, | |
| "loss": 0.1094, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1225.0, | |
| "grad_norm": 1.3652321100234985, | |
| "learning_rate": 4.338888888888889e-05, | |
| "loss": 0.085, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 1250.0, | |
| "grad_norm": 4.102111339569092, | |
| "learning_rate": 4.2e-05, | |
| "loss": 0.0715, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1275.0, | |
| "grad_norm": 0.7971464395523071, | |
| "learning_rate": 4.061111111111111e-05, | |
| "loss": 0.0995, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 1300.0, | |
| "grad_norm": 2.4831583499908447, | |
| "learning_rate": 3.922222222222223e-05, | |
| "loss": 0.0911, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1325.0, | |
| "grad_norm": 0.8036356568336487, | |
| "learning_rate": 3.7833333333333336e-05, | |
| "loss": 0.0744, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 1350.0, | |
| "grad_norm": 0.7885045409202576, | |
| "learning_rate": 3.644444444444445e-05, | |
| "loss": 0.0819, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1375.0, | |
| "grad_norm": 0.8334118127822876, | |
| "learning_rate": 3.505555555555556e-05, | |
| "loss": 0.0814, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 1400.0, | |
| "grad_norm": 3.9760286808013916, | |
| "learning_rate": 3.366666666666667e-05, | |
| "loss": 0.0787, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1425.0, | |
| "grad_norm": 1.063104271888733, | |
| "learning_rate": 3.227777777777778e-05, | |
| "loss": 0.0922, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 1450.0, | |
| "grad_norm": 1.2303999662399292, | |
| "learning_rate": 3.088888888888889e-05, | |
| "loss": 0.0834, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1475.0, | |
| "grad_norm": 3.905423164367676, | |
| "learning_rate": 2.95e-05, | |
| "loss": 0.0704, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 1500.0, | |
| "grad_norm": 0.5851226449012756, | |
| "learning_rate": 2.811111111111111e-05, | |
| "loss": 0.0938, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1500.0, | |
| "eval_loss": 0.8552892804145813, | |
| "eval_runtime": 0.0192, | |
| "eval_samples_per_second": 52.216, | |
| "eval_steps_per_second": 52.216, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1525.0, | |
| "grad_norm": 0.6819572448730469, | |
| "learning_rate": 2.6722222222222228e-05, | |
| "loss": 0.0679, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 1550.0, | |
| "grad_norm": 0.800021767616272, | |
| "learning_rate": 2.5333333333333337e-05, | |
| "loss": 0.0695, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1575.0, | |
| "grad_norm": 0.6414686441421509, | |
| "learning_rate": 2.3944444444444443e-05, | |
| "loss": 0.069, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 1600.0, | |
| "grad_norm": 0.7216551303863525, | |
| "learning_rate": 2.255555555555556e-05, | |
| "loss": 0.0688, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1625.0, | |
| "grad_norm": 2.296966075897217, | |
| "learning_rate": 2.116666666666667e-05, | |
| "loss": 0.0705, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 1650.0, | |
| "grad_norm": 0.7505176067352295, | |
| "learning_rate": 1.9777777777777778e-05, | |
| "loss": 0.0788, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1675.0, | |
| "grad_norm": 0.9566255807876587, | |
| "learning_rate": 1.838888888888889e-05, | |
| "loss": 0.0799, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 1700.0, | |
| "grad_norm": 3.685331106185913, | |
| "learning_rate": 1.7000000000000003e-05, | |
| "loss": 0.0805, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1725.0, | |
| "grad_norm": 0.3868086338043213, | |
| "learning_rate": 1.5611111111111113e-05, | |
| "loss": 0.075, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 1750.0, | |
| "grad_norm": 0.690929114818573, | |
| "learning_rate": 1.4222222222222224e-05, | |
| "loss": 0.0637, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1775.0, | |
| "grad_norm": 6.572366714477539, | |
| "learning_rate": 1.2833333333333333e-05, | |
| "loss": 0.0709, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 1800.0, | |
| "grad_norm": 0.6316695213317871, | |
| "learning_rate": 1.1444444444444446e-05, | |
| "loss": 0.0708, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1825.0, | |
| "grad_norm": 16.782623291015625, | |
| "learning_rate": 1.0055555555555555e-05, | |
| "loss": 0.0711, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 1850.0, | |
| "grad_norm": 0.42628633975982666, | |
| "learning_rate": 8.666666666666668e-06, | |
| "loss": 0.0713, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1875.0, | |
| "grad_norm": 0.48583984375, | |
| "learning_rate": 7.277777777777778e-06, | |
| "loss": 0.0665, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 1900.0, | |
| "grad_norm": 0.5549265146255493, | |
| "learning_rate": 5.888888888888889e-06, | |
| "loss": 0.0658, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1925.0, | |
| "grad_norm": 2.4642038345336914, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0687, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 1950.0, | |
| "grad_norm": 0.607486367225647, | |
| "learning_rate": 3.111111111111111e-06, | |
| "loss": 0.0733, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1975.0, | |
| "grad_norm": 0.6398457884788513, | |
| "learning_rate": 1.7222222222222222e-06, | |
| "loss": 0.066, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 2000.0, | |
| "grad_norm": 0.43928271532058716, | |
| "learning_rate": 3.3333333333333335e-07, | |
| "loss": 0.1062, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2000.0, | |
| "eval_loss": 0.8336465954780579, | |
| "eval_runtime": 0.0207, | |
| "eval_samples_per_second": 48.371, | |
| "eval_steps_per_second": 48.371, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 2000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2000, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 48501262656000.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |