SALAMA_C7 / checkpoint-2000 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
2b1bb1e verified
{
"best_global_step": 2000,
"best_metric": 38.607193210094984,
"best_model_checkpoint": "./SALAMA_C7/checkpoint-2000",
"epoch": 0.8148299042574862,
"eval_steps": 2000,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008148299042574863,
"grad_norm": 15.915007591247559,
"learning_rate": 1.9000000000000002e-06,
"loss": 1.0767,
"step": 20
},
{
"epoch": 0.016296598085149726,
"grad_norm": 10.589001655578613,
"learning_rate": 3.900000000000001e-06,
"loss": 0.9639,
"step": 40
},
{
"epoch": 0.024444897127724588,
"grad_norm": 11.770127296447754,
"learning_rate": 5.9e-06,
"loss": 0.9679,
"step": 60
},
{
"epoch": 0.03259319617029945,
"grad_norm": 11.00736141204834,
"learning_rate": 7.9e-06,
"loss": 0.9264,
"step": 80
},
{
"epoch": 0.04074149521287431,
"grad_norm": 8.219990730285645,
"learning_rate": 9.9e-06,
"loss": 0.7982,
"step": 100
},
{
"epoch": 0.048889794255449176,
"grad_norm": 7.805874824523926,
"learning_rate": 9.973847212663455e-06,
"loss": 0.7226,
"step": 120
},
{
"epoch": 0.057038093298024034,
"grad_norm": 6.7659430503845215,
"learning_rate": 9.946317962835514e-06,
"loss": 0.7378,
"step": 140
},
{
"epoch": 0.0651863923405989,
"grad_norm": 6.5482282638549805,
"learning_rate": 9.91878871300757e-06,
"loss": 0.716,
"step": 160
},
{
"epoch": 0.07333469138317376,
"grad_norm": 6.729607105255127,
"learning_rate": 9.891259463179629e-06,
"loss": 0.7048,
"step": 180
},
{
"epoch": 0.08148299042574862,
"grad_norm": 8.517411231994629,
"learning_rate": 9.863730213351686e-06,
"loss": 0.7016,
"step": 200
},
{
"epoch": 0.0896312894683235,
"grad_norm": 6.785459041595459,
"learning_rate": 9.836200963523744e-06,
"loss": 0.7395,
"step": 220
},
{
"epoch": 0.09777958851089835,
"grad_norm": 6.437356472015381,
"learning_rate": 9.808671713695803e-06,
"loss": 0.6579,
"step": 240
},
{
"epoch": 0.10592788755347321,
"grad_norm": 6.3125762939453125,
"learning_rate": 9.781142463867861e-06,
"loss": 0.6969,
"step": 260
},
{
"epoch": 0.11407618659604807,
"grad_norm": 6.493209362030029,
"learning_rate": 9.753613214039918e-06,
"loss": 0.6338,
"step": 280
},
{
"epoch": 0.12222448563862294,
"grad_norm": 8.855860710144043,
"learning_rate": 9.726083964211977e-06,
"loss": 0.6657,
"step": 300
},
{
"epoch": 0.1303727846811978,
"grad_norm": 5.335328578948975,
"learning_rate": 9.698554714384033e-06,
"loss": 0.6906,
"step": 320
},
{
"epoch": 0.13852108372377267,
"grad_norm": 6.506489276885986,
"learning_rate": 9.671025464556092e-06,
"loss": 0.6485,
"step": 340
},
{
"epoch": 0.14666938276634753,
"grad_norm": 5.023345947265625,
"learning_rate": 9.64349621472815e-06,
"loss": 0.6291,
"step": 360
},
{
"epoch": 0.15481768180892239,
"grad_norm": 8.132065773010254,
"learning_rate": 9.615966964900207e-06,
"loss": 0.6553,
"step": 380
},
{
"epoch": 0.16296598085149724,
"grad_norm": 7.615749359130859,
"learning_rate": 9.588437715072266e-06,
"loss": 0.6635,
"step": 400
},
{
"epoch": 0.1711142798940721,
"grad_norm": 6.487279415130615,
"learning_rate": 9.560908465244322e-06,
"loss": 0.7554,
"step": 420
},
{
"epoch": 0.179262578936647,
"grad_norm": 6.409642219543457,
"learning_rate": 9.53337921541638e-06,
"loss": 0.5839,
"step": 440
},
{
"epoch": 0.18741087797922185,
"grad_norm": 5.239986419677734,
"learning_rate": 9.505849965588438e-06,
"loss": 0.7271,
"step": 460
},
{
"epoch": 0.1955591770217967,
"grad_norm": 5.452706336975098,
"learning_rate": 9.478320715760496e-06,
"loss": 0.6758,
"step": 480
},
{
"epoch": 0.20370747606437156,
"grad_norm": 4.827511787414551,
"learning_rate": 9.450791465932555e-06,
"loss": 0.6178,
"step": 500
},
{
"epoch": 0.21185577510694642,
"grad_norm": 6.320280075073242,
"learning_rate": 9.423262216104611e-06,
"loss": 0.6781,
"step": 520
},
{
"epoch": 0.22000407414952128,
"grad_norm": 5.648658275604248,
"learning_rate": 9.39573296627667e-06,
"loss": 0.6448,
"step": 540
},
{
"epoch": 0.22815237319209614,
"grad_norm": 6.806399822235107,
"learning_rate": 9.368203716448727e-06,
"loss": 0.6206,
"step": 560
},
{
"epoch": 0.23630067223467102,
"grad_norm": 6.556358337402344,
"learning_rate": 9.340674466620785e-06,
"loss": 0.6105,
"step": 580
},
{
"epoch": 0.24444897127724588,
"grad_norm": 5.21808385848999,
"learning_rate": 9.313145216792842e-06,
"loss": 0.651,
"step": 600
},
{
"epoch": 0.2525972703198207,
"grad_norm": 6.145657539367676,
"learning_rate": 9.2856159669649e-06,
"loss": 0.6504,
"step": 620
},
{
"epoch": 0.2607455693623956,
"grad_norm": 4.201127529144287,
"learning_rate": 9.258086717136959e-06,
"loss": 0.5885,
"step": 640
},
{
"epoch": 0.2688938684049705,
"grad_norm": 5.38640022277832,
"learning_rate": 9.230557467309017e-06,
"loss": 0.5888,
"step": 660
},
{
"epoch": 0.27704216744754534,
"grad_norm": 5.217910289764404,
"learning_rate": 9.203028217481074e-06,
"loss": 0.5901,
"step": 680
},
{
"epoch": 0.2851904664901202,
"grad_norm": 5.227973461151123,
"learning_rate": 9.175498967653133e-06,
"loss": 0.7178,
"step": 700
},
{
"epoch": 0.29333876553269506,
"grad_norm": 6.235045909881592,
"learning_rate": 9.147969717825191e-06,
"loss": 0.6711,
"step": 720
},
{
"epoch": 0.3014870645752699,
"grad_norm": 5.588578224182129,
"learning_rate": 9.120440467997248e-06,
"loss": 0.522,
"step": 740
},
{
"epoch": 0.30963536361784477,
"grad_norm": 5.49254035949707,
"learning_rate": 9.092911218169306e-06,
"loss": 0.5962,
"step": 760
},
{
"epoch": 0.31778366266041963,
"grad_norm": 5.4190239906311035,
"learning_rate": 9.065381968341363e-06,
"loss": 0.5981,
"step": 780
},
{
"epoch": 0.3259319617029945,
"grad_norm": 6.350463390350342,
"learning_rate": 9.037852718513422e-06,
"loss": 0.6114,
"step": 800
},
{
"epoch": 0.33408026074556935,
"grad_norm": 6.231777191162109,
"learning_rate": 9.010323468685478e-06,
"loss": 0.6729,
"step": 820
},
{
"epoch": 0.3422285597881442,
"grad_norm": 6.590610027313232,
"learning_rate": 8.982794218857537e-06,
"loss": 0.6124,
"step": 840
},
{
"epoch": 0.35037685883071906,
"grad_norm": 5.504190444946289,
"learning_rate": 8.955264969029595e-06,
"loss": 0.6555,
"step": 860
},
{
"epoch": 0.358525157873294,
"grad_norm": 4.821358680725098,
"learning_rate": 8.927735719201652e-06,
"loss": 0.5743,
"step": 880
},
{
"epoch": 0.36667345691586883,
"grad_norm": 6.006564140319824,
"learning_rate": 8.90020646937371e-06,
"loss": 0.6157,
"step": 900
},
{
"epoch": 0.3748217559584437,
"grad_norm": 5.9578471183776855,
"learning_rate": 8.872677219545767e-06,
"loss": 0.6485,
"step": 920
},
{
"epoch": 0.38297005500101855,
"grad_norm": 5.149762153625488,
"learning_rate": 8.845147969717826e-06,
"loss": 0.5951,
"step": 940
},
{
"epoch": 0.3911183540435934,
"grad_norm": 6.143222332000732,
"learning_rate": 8.817618719889883e-06,
"loss": 0.5805,
"step": 960
},
{
"epoch": 0.39926665308616827,
"grad_norm": 5.496342658996582,
"learning_rate": 8.790089470061941e-06,
"loss": 0.6158,
"step": 980
},
{
"epoch": 0.4074149521287431,
"grad_norm": 5.14535665512085,
"learning_rate": 8.762560220234e-06,
"loss": 0.6036,
"step": 1000
},
{
"epoch": 0.415563251171318,
"grad_norm": 5.73581600189209,
"learning_rate": 8.735030970406058e-06,
"loss": 0.5925,
"step": 1020
},
{
"epoch": 0.42371155021389284,
"grad_norm": 4.455753326416016,
"learning_rate": 8.707501720578115e-06,
"loss": 0.5894,
"step": 1040
},
{
"epoch": 0.4318598492564677,
"grad_norm": 6.366560935974121,
"learning_rate": 8.679972470750173e-06,
"loss": 0.6424,
"step": 1060
},
{
"epoch": 0.44000814829904256,
"grad_norm": 5.11430025100708,
"learning_rate": 8.652443220922232e-06,
"loss": 0.5731,
"step": 1080
},
{
"epoch": 0.4481564473416174,
"grad_norm": 6.0588812828063965,
"learning_rate": 8.624913971094288e-06,
"loss": 0.6257,
"step": 1100
},
{
"epoch": 0.4563047463841923,
"grad_norm": 6.879603385925293,
"learning_rate": 8.597384721266347e-06,
"loss": 0.6427,
"step": 1120
},
{
"epoch": 0.4644530454267672,
"grad_norm": 4.6673502922058105,
"learning_rate": 8.569855471438404e-06,
"loss": 0.5763,
"step": 1140
},
{
"epoch": 0.47260134446934204,
"grad_norm": 5.316527843475342,
"learning_rate": 8.542326221610462e-06,
"loss": 0.6207,
"step": 1160
},
{
"epoch": 0.4807496435119169,
"grad_norm": 5.4205780029296875,
"learning_rate": 8.514796971782519e-06,
"loss": 0.5435,
"step": 1180
},
{
"epoch": 0.48889794255449176,
"grad_norm": 4.579510688781738,
"learning_rate": 8.487267721954577e-06,
"loss": 0.5937,
"step": 1200
},
{
"epoch": 0.4970462415970666,
"grad_norm": 6.327101707458496,
"learning_rate": 8.459738472126636e-06,
"loss": 0.6186,
"step": 1220
},
{
"epoch": 0.5051945406396414,
"grad_norm": 5.347973346710205,
"learning_rate": 8.432209222298693e-06,
"loss": 0.564,
"step": 1240
},
{
"epoch": 0.5133428396822164,
"grad_norm": 5.44881010055542,
"learning_rate": 8.404679972470751e-06,
"loss": 0.5221,
"step": 1260
},
{
"epoch": 0.5214911387247912,
"grad_norm": 5.802955150604248,
"learning_rate": 8.377150722642808e-06,
"loss": 0.5893,
"step": 1280
},
{
"epoch": 0.5296394377673661,
"grad_norm": 5.931309223175049,
"learning_rate": 8.349621472814866e-06,
"loss": 0.6306,
"step": 1300
},
{
"epoch": 0.537787736809941,
"grad_norm": 5.793541431427002,
"learning_rate": 8.322092222986923e-06,
"loss": 0.6199,
"step": 1320
},
{
"epoch": 0.5459360358525158,
"grad_norm": 4.972778797149658,
"learning_rate": 8.294562973158982e-06,
"loss": 0.4771,
"step": 1340
},
{
"epoch": 0.5540843348950907,
"grad_norm": 5.573855876922607,
"learning_rate": 8.26703372333104e-06,
"loss": 0.5054,
"step": 1360
},
{
"epoch": 0.5622326339376655,
"grad_norm": 4.7720947265625,
"learning_rate": 8.239504473503099e-06,
"loss": 0.5599,
"step": 1380
},
{
"epoch": 0.5703809329802404,
"grad_norm": 6.338388442993164,
"learning_rate": 8.211975223675155e-06,
"loss": 0.5612,
"step": 1400
},
{
"epoch": 0.5785292320228153,
"grad_norm": 6.1034464836120605,
"learning_rate": 8.184445973847214e-06,
"loss": 0.5662,
"step": 1420
},
{
"epoch": 0.5866775310653901,
"grad_norm": 5.423385143280029,
"learning_rate": 8.15691672401927e-06,
"loss": 0.6246,
"step": 1440
},
{
"epoch": 0.594825830107965,
"grad_norm": 6.507946014404297,
"learning_rate": 8.12938747419133e-06,
"loss": 0.5879,
"step": 1460
},
{
"epoch": 0.6029741291505398,
"grad_norm": 5.927179336547852,
"learning_rate": 8.101858224363388e-06,
"loss": 0.5479,
"step": 1480
},
{
"epoch": 0.6111224281931147,
"grad_norm": 6.775645732879639,
"learning_rate": 8.074328974535444e-06,
"loss": 0.5809,
"step": 1500
},
{
"epoch": 0.6192707272356895,
"grad_norm": 4.263314247131348,
"learning_rate": 8.046799724707503e-06,
"loss": 0.55,
"step": 1520
},
{
"epoch": 0.6274190262782644,
"grad_norm": 4.671892166137695,
"learning_rate": 8.01927047487956e-06,
"loss": 0.529,
"step": 1540
},
{
"epoch": 0.6355673253208393,
"grad_norm": 5.7395501136779785,
"learning_rate": 7.991741225051618e-06,
"loss": 0.5349,
"step": 1560
},
{
"epoch": 0.6437156243634141,
"grad_norm": 5.88397216796875,
"learning_rate": 7.964211975223675e-06,
"loss": 0.5675,
"step": 1580
},
{
"epoch": 0.651863923405989,
"grad_norm": 5.037132263183594,
"learning_rate": 7.936682725395733e-06,
"loss": 0.5838,
"step": 1600
},
{
"epoch": 0.6600122224485638,
"grad_norm": 6.442205429077148,
"learning_rate": 7.909153475567792e-06,
"loss": 0.5275,
"step": 1620
},
{
"epoch": 0.6681605214911387,
"grad_norm": 4.626034259796143,
"learning_rate": 7.881624225739849e-06,
"loss": 0.5251,
"step": 1640
},
{
"epoch": 0.6763088205337136,
"grad_norm": 6.96089506149292,
"learning_rate": 7.854094975911907e-06,
"loss": 0.589,
"step": 1660
},
{
"epoch": 0.6844571195762884,
"grad_norm": 5.303503036499023,
"learning_rate": 7.826565726083964e-06,
"loss": 0.6233,
"step": 1680
},
{
"epoch": 0.6926054186188633,
"grad_norm": 6.015293121337891,
"learning_rate": 7.799036476256022e-06,
"loss": 0.5517,
"step": 1700
},
{
"epoch": 0.7007537176614381,
"grad_norm": 5.348663806915283,
"learning_rate": 7.77150722642808e-06,
"loss": 0.5403,
"step": 1720
},
{
"epoch": 0.708902016704013,
"grad_norm": 4.189513206481934,
"learning_rate": 7.743977976600138e-06,
"loss": 0.5817,
"step": 1740
},
{
"epoch": 0.717050315746588,
"grad_norm": 7.446457386016846,
"learning_rate": 7.716448726772196e-06,
"loss": 0.5437,
"step": 1760
},
{
"epoch": 0.7251986147891628,
"grad_norm": 4.759880542755127,
"learning_rate": 7.688919476944255e-06,
"loss": 0.5648,
"step": 1780
},
{
"epoch": 0.7333469138317377,
"grad_norm": 5.6719584465026855,
"learning_rate": 7.661390227116311e-06,
"loss": 0.6118,
"step": 1800
},
{
"epoch": 0.7414952128743125,
"grad_norm": 4.943815231323242,
"learning_rate": 7.63386097728837e-06,
"loss": 0.5326,
"step": 1820
},
{
"epoch": 0.7496435119168874,
"grad_norm": 3.6254968643188477,
"learning_rate": 7.6063317274604275e-06,
"loss": 0.5087,
"step": 1840
},
{
"epoch": 0.7577918109594622,
"grad_norm": 5.9594526290893555,
"learning_rate": 7.578802477632485e-06,
"loss": 0.532,
"step": 1860
},
{
"epoch": 0.7659401100020371,
"grad_norm": 5.103460311889648,
"learning_rate": 7.551273227804543e-06,
"loss": 0.553,
"step": 1880
},
{
"epoch": 0.774088409044612,
"grad_norm": 6.327749729156494,
"learning_rate": 7.5237439779766004e-06,
"loss": 0.5767,
"step": 1900
},
{
"epoch": 0.7822367080871868,
"grad_norm": 5.8910088539123535,
"learning_rate": 7.496214728148659e-06,
"loss": 0.5948,
"step": 1920
},
{
"epoch": 0.7903850071297617,
"grad_norm": 5.571481227874756,
"learning_rate": 7.4686854783207165e-06,
"loss": 0.5199,
"step": 1940
},
{
"epoch": 0.7985333061723365,
"grad_norm": 4.894184589385986,
"learning_rate": 7.441156228492774e-06,
"loss": 0.5577,
"step": 1960
},
{
"epoch": 0.8066816052149114,
"grad_norm": 5.402436256408691,
"learning_rate": 7.413626978664832e-06,
"loss": 0.5395,
"step": 1980
},
{
"epoch": 0.8148299042574862,
"grad_norm": 5.212170600891113,
"learning_rate": 7.3860977288368894e-06,
"loss": 0.5319,
"step": 2000
},
{
"epoch": 0.8148299042574862,
"eval_loss": 0.4680393934249878,
"eval_runtime": 10851.5751,
"eval_samples_per_second": 1.81,
"eval_steps_per_second": 0.226,
"eval_wer": 38.607193210094984,
"step": 2000
}
],
"logging_steps": 20,
"max_steps": 7365,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.846946562048e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}