fayda_agent / checkpoint-400 /trainer_state.json
hamzabouajila's picture
Upload adapter and processor from whisper-tunisian-lora
d7bf90e verified
{
"best_global_step": 200,
"best_metric": 40.680564071122014,
"best_model_checkpoint": "./whisper-tunisian-lora/checkpoint-200",
"epoch": 3.4188034188034186,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.21367521367521367,
"grad_norm": 0.4574618339538574,
"learning_rate": 0.00048,
"loss": 1.0657,
"step": 25
},
{
"epoch": 0.42735042735042733,
"grad_norm": 0.4719775915145874,
"learning_rate": 0.00098,
"loss": 0.8277,
"step": 50
},
{
"epoch": 0.6410256410256411,
"grad_norm": 0.4478321075439453,
"learning_rate": 0.0009466666666666667,
"loss": 0.727,
"step": 75
},
{
"epoch": 0.8547008547008547,
"grad_norm": 0.5259848833084106,
"learning_rate": 0.0008911111111111111,
"loss": 0.6485,
"step": 100
},
{
"epoch": 0.8547008547008547,
"eval_loss": 0.6768606901168823,
"eval_runtime": 590.6537,
"eval_samples_per_second": 0.176,
"eval_steps_per_second": 0.022,
"eval_wer": 46.413243408951566,
"step": 100
},
{
"epoch": 1.0683760683760684,
"grad_norm": 0.6370269656181335,
"learning_rate": 0.0008355555555555556,
"loss": 0.5682,
"step": 125
},
{
"epoch": 1.282051282051282,
"grad_norm": 0.5759462714195251,
"learning_rate": 0.0007800000000000001,
"loss": 0.4578,
"step": 150
},
{
"epoch": 1.4957264957264957,
"grad_norm": 0.47142961621284485,
"learning_rate": 0.0007244444444444444,
"loss": 0.468,
"step": 175
},
{
"epoch": 1.7094017094017095,
"grad_norm": 0.36835190653800964,
"learning_rate": 0.0006688888888888889,
"loss": 0.5021,
"step": 200
},
{
"epoch": 1.7094017094017095,
"eval_loss": 0.6350020170211792,
"eval_runtime": 508.4596,
"eval_samples_per_second": 0.205,
"eval_steps_per_second": 0.026,
"eval_wer": 40.680564071122014,
"step": 200
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.399702250957489,
"learning_rate": 0.0006133333333333334,
"loss": 0.5935,
"step": 225
},
{
"epoch": 2.1367521367521367,
"grad_norm": 0.43302881717681885,
"learning_rate": 0.0005577777777777778,
"loss": 0.3065,
"step": 250
},
{
"epoch": 2.3504273504273505,
"grad_norm": 0.5065337419509888,
"learning_rate": 0.0005022222222222223,
"loss": 0.3235,
"step": 275
},
{
"epoch": 2.564102564102564,
"grad_norm": 0.499609112739563,
"learning_rate": 0.00044666666666666666,
"loss": 0.3312,
"step": 300
},
{
"epoch": 2.564102564102564,
"eval_loss": 0.6243518590927124,
"eval_runtime": 723.1956,
"eval_samples_per_second": 0.144,
"eval_steps_per_second": 0.018,
"eval_wer": 93.93010423053342,
"step": 300
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.4077036380767822,
"learning_rate": 0.0003911111111111111,
"loss": 0.304,
"step": 325
},
{
"epoch": 2.9914529914529915,
"grad_norm": 0.3863629996776581,
"learning_rate": 0.0003355555555555556,
"loss": 0.2962,
"step": 350
},
{
"epoch": 3.2051282051282053,
"grad_norm": 0.43283241987228394,
"learning_rate": 0.00028000000000000003,
"loss": 0.1706,
"step": 375
},
{
"epoch": 3.4188034188034186,
"grad_norm": 0.38285499811172485,
"learning_rate": 0.00022444444444444446,
"loss": 0.1935,
"step": 400
},
{
"epoch": 3.4188034188034186,
"eval_loss": 0.6525424718856812,
"eval_runtime": 660.1882,
"eval_samples_per_second": 0.158,
"eval_steps_per_second": 0.02,
"eval_wer": 55.54874310239117,
"step": 400
}
],
"logging_steps": 25,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.092615143030784e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}