SALAMA_C3 / checkpoint-1000 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
32e5646 verified
{
"best_global_step": 1000,
"best_metric": 14.071299035983584,
"best_model_checkpoint": "./SALAMA_C3/checkpoint-1000",
"epoch": 1.675041876046901,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03350083752093802,
"grad_norm": 11.757207870483398,
"learning_rate": 6.333333333333334e-07,
"loss": 0.5112,
"step": 20
},
{
"epoch": 0.06700167504187604,
"grad_norm": 10.197953224182129,
"learning_rate": 1.3e-06,
"loss": 0.5283,
"step": 40
},
{
"epoch": 0.10050251256281408,
"grad_norm": 12.811074256896973,
"learning_rate": 1.9666666666666668e-06,
"loss": 0.474,
"step": 60
},
{
"epoch": 0.13400335008375208,
"grad_norm": 8.559342384338379,
"learning_rate": 2.6e-06,
"loss": 0.424,
"step": 80
},
{
"epoch": 0.16750418760469013,
"grad_norm": 6.9629225730896,
"learning_rate": 3.266666666666667e-06,
"loss": 0.4465,
"step": 100
},
{
"epoch": 0.20100502512562815,
"grad_norm": 7.833058834075928,
"learning_rate": 3.9333333333333335e-06,
"loss": 0.3764,
"step": 120
},
{
"epoch": 0.23450586264656617,
"grad_norm": 6.882424831390381,
"learning_rate": 4.600000000000001e-06,
"loss": 0.3705,
"step": 140
},
{
"epoch": 0.26800670016750416,
"grad_norm": 5.4183244705200195,
"learning_rate": 5.2666666666666665e-06,
"loss": 0.4116,
"step": 160
},
{
"epoch": 0.3015075376884422,
"grad_norm": 6.103787899017334,
"learning_rate": 5.933333333333335e-06,
"loss": 0.3903,
"step": 180
},
{
"epoch": 0.33500837520938026,
"grad_norm": 6.084160804748535,
"learning_rate": 6.600000000000001e-06,
"loss": 0.3794,
"step": 200
},
{
"epoch": 0.3685092127303183,
"grad_norm": 4.531406879425049,
"learning_rate": 7.266666666666668e-06,
"loss": 0.3904,
"step": 220
},
{
"epoch": 0.4020100502512563,
"grad_norm": 6.702854156494141,
"learning_rate": 7.933333333333334e-06,
"loss": 0.3672,
"step": 240
},
{
"epoch": 0.4355108877721943,
"grad_norm": 6.386377811431885,
"learning_rate": 8.6e-06,
"loss": 0.3379,
"step": 260
},
{
"epoch": 0.46901172529313234,
"grad_norm": 5.33281135559082,
"learning_rate": 9.266666666666667e-06,
"loss": 0.4102,
"step": 280
},
{
"epoch": 0.5025125628140703,
"grad_norm": 6.031435012817383,
"learning_rate": 9.933333333333334e-06,
"loss": 0.3815,
"step": 300
},
{
"epoch": 0.5360134003350083,
"grad_norm": 4.8638176918029785,
"learning_rate": 9.968253968253969e-06,
"loss": 0.358,
"step": 320
},
{
"epoch": 0.5695142378559463,
"grad_norm": 5.352113723754883,
"learning_rate": 9.932980599647268e-06,
"loss": 0.3578,
"step": 340
},
{
"epoch": 0.6030150753768844,
"grad_norm": 5.560739040374756,
"learning_rate": 9.897707231040565e-06,
"loss": 0.3795,
"step": 360
},
{
"epoch": 0.6365159128978225,
"grad_norm": 6.186940670013428,
"learning_rate": 9.862433862433864e-06,
"loss": 0.3852,
"step": 380
},
{
"epoch": 0.6700167504187605,
"grad_norm": 5.37507438659668,
"learning_rate": 9.827160493827161e-06,
"loss": 0.3761,
"step": 400
},
{
"epoch": 0.7035175879396985,
"grad_norm": 6.373449802398682,
"learning_rate": 9.79188712522046e-06,
"loss": 0.3674,
"step": 420
},
{
"epoch": 0.7370184254606366,
"grad_norm": 5.702625274658203,
"learning_rate": 9.756613756613757e-06,
"loss": 0.4013,
"step": 440
},
{
"epoch": 0.7705192629815746,
"grad_norm": 5.5675153732299805,
"learning_rate": 9.721340388007056e-06,
"loss": 0.3495,
"step": 460
},
{
"epoch": 0.8040201005025126,
"grad_norm": 6.296374320983887,
"learning_rate": 9.686067019400353e-06,
"loss": 0.3704,
"step": 480
},
{
"epoch": 0.8375209380234506,
"grad_norm": 4.830463886260986,
"learning_rate": 9.650793650793652e-06,
"loss": 0.36,
"step": 500
},
{
"epoch": 0.8375209380234506,
"eval_loss": 0.27764827013015747,
"eval_runtime": 1752.3826,
"eval_samples_per_second": 2.725,
"eval_steps_per_second": 0.341,
"eval_wer": 19.59291781998664,
"step": 500
},
{
"epoch": 0.8710217755443886,
"grad_norm": 4.863058090209961,
"learning_rate": 9.61552028218695e-06,
"loss": 0.3874,
"step": 520
},
{
"epoch": 0.9045226130653267,
"grad_norm": 6.121027946472168,
"learning_rate": 9.580246913580248e-06,
"loss": 0.382,
"step": 540
},
{
"epoch": 0.9380234505862647,
"grad_norm": 4.538082599639893,
"learning_rate": 9.544973544973546e-06,
"loss": 0.3611,
"step": 560
},
{
"epoch": 0.9715242881072027,
"grad_norm": 4.8772382736206055,
"learning_rate": 9.509700176366844e-06,
"loss": 0.3584,
"step": 580
},
{
"epoch": 1.0050251256281406,
"grad_norm": 4.039212703704834,
"learning_rate": 9.474426807760142e-06,
"loss": 0.3701,
"step": 600
},
{
"epoch": 1.0385259631490786,
"grad_norm": 4.787687301635742,
"learning_rate": 9.43915343915344e-06,
"loss": 0.2058,
"step": 620
},
{
"epoch": 1.0720268006700167,
"grad_norm": 4.503021717071533,
"learning_rate": 9.403880070546738e-06,
"loss": 0.2455,
"step": 640
},
{
"epoch": 1.1055276381909547,
"grad_norm": 6.663857936859131,
"learning_rate": 9.368606701940036e-06,
"loss": 0.2179,
"step": 660
},
{
"epoch": 1.1390284757118927,
"grad_norm": 4.486196041107178,
"learning_rate": 9.333333333333334e-06,
"loss": 0.2196,
"step": 680
},
{
"epoch": 1.1725293132328307,
"grad_norm": 4.667060852050781,
"learning_rate": 9.298059964726633e-06,
"loss": 0.2183,
"step": 700
},
{
"epoch": 1.2060301507537687,
"grad_norm": 5.608316898345947,
"learning_rate": 9.26278659611993e-06,
"loss": 0.2161,
"step": 720
},
{
"epoch": 1.2395309882747068,
"grad_norm": 4.2184271812438965,
"learning_rate": 9.227513227513229e-06,
"loss": 0.2382,
"step": 740
},
{
"epoch": 1.2730318257956448,
"grad_norm": 3.871945381164551,
"learning_rate": 9.192239858906526e-06,
"loss": 0.2214,
"step": 760
},
{
"epoch": 1.3065326633165828,
"grad_norm": 3.730222225189209,
"learning_rate": 9.156966490299825e-06,
"loss": 0.2213,
"step": 780
},
{
"epoch": 1.3400335008375208,
"grad_norm": 4.740777015686035,
"learning_rate": 9.121693121693122e-06,
"loss": 0.2503,
"step": 800
},
{
"epoch": 1.3735343383584588,
"grad_norm": 4.123469352722168,
"learning_rate": 9.086419753086421e-06,
"loss": 0.2443,
"step": 820
},
{
"epoch": 1.4070351758793969,
"grad_norm": 3.988917827606201,
"learning_rate": 9.051146384479718e-06,
"loss": 0.241,
"step": 840
},
{
"epoch": 1.4405360134003349,
"grad_norm": 5.4730305671691895,
"learning_rate": 9.015873015873017e-06,
"loss": 0.2163,
"step": 860
},
{
"epoch": 1.474036850921273,
"grad_norm": 5.020652770996094,
"learning_rate": 8.980599647266314e-06,
"loss": 0.2387,
"step": 880
},
{
"epoch": 1.507537688442211,
"grad_norm": 4.231486797332764,
"learning_rate": 8.945326278659613e-06,
"loss": 0.2523,
"step": 900
},
{
"epoch": 1.541038525963149,
"grad_norm": 6.197975158691406,
"learning_rate": 8.910052910052912e-06,
"loss": 0.2255,
"step": 920
},
{
"epoch": 1.574539363484087,
"grad_norm": 5.489210605621338,
"learning_rate": 8.874779541446209e-06,
"loss": 0.2397,
"step": 940
},
{
"epoch": 1.608040201005025,
"grad_norm": 4.32737398147583,
"learning_rate": 8.839506172839508e-06,
"loss": 0.2298,
"step": 960
},
{
"epoch": 1.641541038525963,
"grad_norm": 4.504214763641357,
"learning_rate": 8.804232804232805e-06,
"loss": 0.2301,
"step": 980
},
{
"epoch": 1.675041876046901,
"grad_norm": 3.9694759845733643,
"learning_rate": 8.768959435626104e-06,
"loss": 0.2338,
"step": 1000
},
{
"epoch": 1.675041876046901,
"eval_loss": 0.18838582932949066,
"eval_runtime": 1770.3449,
"eval_samples_per_second": 2.698,
"eval_steps_per_second": 0.337,
"eval_wer": 14.071299035983584,
"step": 1000
}
],
"logging_steps": 20,
"max_steps": 5970,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.23386705403904e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}