AraFix-V3.0 / trainer_state.json
Basma2423's picture
Training in progress, step 16280
5ac1c18 verified
{
"best_global_step": 16280,
"best_metric": 0.05104936846864332,
"best_model_checkpoint": "checkpoints/checkpoint-16280",
"epoch": 1.9992631708215645,
"eval_steps": 1628,
"global_step": 16280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06140243153628884,
"grad_norm": 0.31788545846939087,
"learning_rate": 4.99487482593161e-05,
"loss": 0.639,
"step": 500
},
{
"epoch": 0.12280486307257768,
"grad_norm": 0.21716584265232086,
"learning_rate": 4.9794381003990576e-05,
"loss": 0.3961,
"step": 1000
},
{
"epoch": 0.1842072946088665,
"grad_norm": 0.32310155034065247,
"learning_rate": 4.9537535426562465e-05,
"loss": 0.3303,
"step": 1500
},
{
"epoch": 0.19992631708215644,
"eval_avg": 0.07433640047160056,
"eval_cer": 0.048444434926464684,
"eval_der": 0.054747223027151605,
"eval_loss": 0.2203781008720398,
"eval_runtime": 1149.449,
"eval_samples_per_second": 45.035,
"eval_steps_per_second": 0.352,
"eval_wer": 0.1198175434611854,
"step": 1628
},
{
"epoch": 0.24560972614515536,
"grad_norm": 0.1829567700624466,
"learning_rate": 4.997155120906588e-05,
"loss": 0.287,
"step": 2000
},
{
"epoch": 0.3070121576814442,
"grad_norm": 0.1720687448978424,
"learning_rate": 4.984333190353011e-05,
"loss": 0.2682,
"step": 2500
},
{
"epoch": 0.368414589217733,
"grad_norm": 0.19322410225868225,
"learning_rate": 4.961243195561407e-05,
"loss": 0.2506,
"step": 3000
},
{
"epoch": 0.3998526341643129,
"eval_avg": 0.061321180853411116,
"eval_cer": 0.04388846969480041,
"eval_der": 0.03895160822949064,
"eval_loss": 0.18064290285110474,
"eval_runtime": 1155.5329,
"eval_samples_per_second": 44.798,
"eval_steps_per_second": 0.35,
"eval_wer": 0.10112346463594228,
"step": 3256
},
{
"epoch": 0.42981702075402184,
"grad_norm": 0.16168585419654846,
"learning_rate": 4.998779393767303e-05,
"loss": 0.2339,
"step": 3500
},
{
"epoch": 0.4912194522903107,
"grad_norm": 0.12988826632499695,
"learning_rate": 4.988596305882391e-05,
"loss": 0.2271,
"step": 4000
},
{
"epoch": 0.5526218838265995,
"grad_norm": 0.11692152917385101,
"learning_rate": 4.9681275337621505e-05,
"loss": 0.2181,
"step": 4500
},
{
"epoch": 0.5997789512464694,
"eval_avg": 0.06223662921729023,
"eval_cer": 0.03700322837976026,
"eval_der": 0.055956727518601486,
"eval_loss": 0.19135934114456177,
"eval_runtime": 1153.4306,
"eval_samples_per_second": 44.879,
"eval_steps_per_second": 0.351,
"eval_wer": 0.09374993175350896,
"step": 4884
},
{
"epoch": 0.6140243153628884,
"grad_norm": 0.12457627058029175,
"learning_rate": 4.9374576774384186e-05,
"loss": 0.2112,
"step": 5000
},
{
"epoch": 0.6754267468991773,
"grad_norm": 0.13500584661960602,
"learning_rate": 4.8967134993181585e-05,
"loss": 0.2042,
"step": 5500
},
{
"epoch": 0.736829178435466,
"grad_norm": 0.14300259947776794,
"learning_rate": 4.846063400258344e-05,
"loss": 0.1994,
"step": 6000
},
{
"epoch": 0.7982316099717549,
"grad_norm": 0.15229490399360657,
"learning_rate": 4.785716723543856e-05,
"loss": 0.1941,
"step": 6500
},
{
"epoch": 0.7997052683286258,
"eval_avg": 0.05848093903880644,
"eval_cer": 0.034985685323039174,
"eval_der": 0.05257239447503411,
"eval_loss": 0.17743617296218872,
"eval_runtime": 1154.8064,
"eval_samples_per_second": 44.826,
"eval_steps_per_second": 0.351,
"eval_wer": 0.08788473731834603,
"step": 6512
},
{
"epoch": 0.8596340415080437,
"grad_norm": 0.10620284080505371,
"learning_rate": 4.9950986655011213e-05,
"loss": 0.1892,
"step": 7000
},
{
"epoch": 0.9210364730443326,
"grad_norm": 0.10538509488105774,
"learning_rate": 4.9798882653846754e-05,
"loss": 0.1843,
"step": 7500
},
{
"epoch": 0.9824389045806214,
"grad_norm": 0.11998545378446579,
"learning_rate": 4.954428172468978e-05,
"loss": 0.1814,
"step": 8000
},
{
"epoch": 0.9996315854107822,
"eval_avg": 0.05574338139826937,
"eval_cer": 0.0337392713322537,
"eval_der": 0.05011513570945588,
"eval_loss": 0.16507230699062347,
"eval_runtime": 1158.6086,
"eval_samples_per_second": 44.679,
"eval_steps_per_second": 0.35,
"eval_wer": 0.08337573715309854,
"step": 8140
},
{
"epoch": 1.0438413361169103,
"grad_norm": 0.12560783326625824,
"learning_rate": 4.9189048589485774e-05,
"loss": 0.1764,
"step": 8500
},
{
"epoch": 1.105243767653199,
"grad_norm": 0.10523095726966858,
"learning_rate": 4.8733228183318665e-05,
"loss": 0.1735,
"step": 9000
},
{
"epoch": 1.1666461991894879,
"grad_norm": 0.11041487008333206,
"learning_rate": 4.817931533427457e-05,
"loss": 0.1715,
"step": 9500
},
{
"epoch": 1.1995579024929388,
"eval_avg": 0.0544335846636687,
"eval_cer": 0.03342891994782228,
"eval_der": 0.048513281174538105,
"eval_loss": 0.1576041877269745,
"eval_runtime": 1159.9625,
"eval_samples_per_second": 44.626,
"eval_steps_per_second": 0.349,
"eval_wer": 0.08135855286864571,
"step": 9768
},
{
"epoch": 1.2280486307257767,
"grad_norm": 0.1073341891169548,
"learning_rate": 4.998896962187747e-05,
"loss": 0.1674,
"step": 10000
},
{
"epoch": 1.2894510622620656,
"grad_norm": 0.11093363165855408,
"learning_rate": 4.988961418344719e-05,
"loss": 0.167,
"step": 10500
},
{
"epoch": 1.3508534937983545,
"grad_norm": 0.09265608340501785,
"learning_rate": 4.968738681210237e-05,
"loss": 0.165,
"step": 11000
},
{
"epoch": 1.3994842195750952,
"eval_avg": 0.05450751736133735,
"eval_cer": 0.03384634860456726,
"eval_der": 0.04814585144402502,
"eval_loss": 0.15283821523189545,
"eval_runtime": 1150.9092,
"eval_samples_per_second": 44.977,
"eval_steps_per_second": 0.352,
"eval_wer": 0.08153035203541975,
"step": 11396
},
{
"epoch": 1.4122559253346432,
"grad_norm": 0.10535308718681335,
"learning_rate": 4.999780685937908e-05,
"loss": 0.1617,
"step": 11500
},
{
"epoch": 1.473658356870932,
"grad_norm": 0.09392343461513519,
"learning_rate": 4.992486965915939e-05,
"loss": 0.161,
"step": 12000
},
{
"epoch": 1.535060788407221,
"grad_norm": 0.0898473933339119,
"learning_rate": 4.974891481067152e-05,
"loss": 0.1593,
"step": 12500
},
{
"epoch": 1.5964632199435098,
"grad_norm": 0.10352090746164322,
"learning_rate": 4.94706695576259e-05,
"loss": 0.1581,
"step": 13000
},
{
"epoch": 1.5994105366572517,
"eval_avg": 0.052648695144827624,
"eval_cer": 0.03269155476372104,
"eval_der": 0.04647078141601352,
"eval_loss": 0.1520785242319107,
"eval_runtime": 1154.3982,
"eval_samples_per_second": 44.842,
"eval_steps_per_second": 0.351,
"eval_wer": 0.07878374925474832,
"step": 13024
},
{
"epoch": 1.6578656514797987,
"grad_norm": 0.08162818104028702,
"learning_rate": 4.995337159613658e-05,
"loss": 0.1563,
"step": 13500
},
{
"epoch": 1.7192680830160874,
"grad_norm": 0.10093415528535843,
"learning_rate": 4.9803736946497604e-05,
"loss": 0.1549,
"step": 14000
},
{
"epoch": 1.7806705145523762,
"grad_norm": 0.0779104083776474,
"learning_rate": 4.9551585305458784e-05,
"loss": 0.1531,
"step": 14500
},
{
"epoch": 1.799336853739408,
"eval_avg": 0.05215597602151043,
"eval_cer": 0.032804504015484064,
"eval_der": 0.046157249106538095,
"eval_loss": 0.1437014639377594,
"eval_runtime": 1158.5459,
"eval_samples_per_second": 44.681,
"eval_steps_per_second": 0.35,
"eval_wer": 0.07750617494250915,
"step": 14652
},
{
"epoch": 1.8420729460886651,
"grad_norm": 0.09926008433103561,
"learning_rate": 4.919795884774044e-05,
"loss": 0.1519,
"step": 15000
},
{
"epoch": 1.903475377624954,
"grad_norm": 0.0932171419262886,
"learning_rate": 4.874431915635338e-05,
"loss": 0.1508,
"step": 15500
},
{
"epoch": 1.9648778091612429,
"grad_norm": 0.08440640568733215,
"learning_rate": 4.819254118169078e-05,
"loss": 0.1493,
"step": 16000
},
{
"epoch": 1.9992631708215645,
"eval_avg": 0.05104936846864332,
"eval_cer": 0.03196083873281993,
"eval_der": 0.04519598184101095,
"eval_loss": 0.1432473361492157,
"eval_runtime": 1163.7456,
"eval_samples_per_second": 44.481,
"eval_steps_per_second": 0.348,
"eval_wer": 0.07599128483209908,
"step": 16280
}
],
"logging_steps": 500,
"max_steps": 24429,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1628,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.8107754052964844e+18,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}