bitwisemind's picture
Checkpoint at step 500
516a895 verified
{
"best_metric": 37.98093560260484,
"best_model_checkpoint": "./whisper-lora-15k-adapters/checkpoint-500",
"epoch": 0.585480093676815,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02927400468384075,
"grad_norm": 0.49275097250938416,
"learning_rate": 0.0005,
"loss": 0.9988,
"step": 25
},
{
"epoch": 0.0585480093676815,
"grad_norm": 0.5272337794303894,
"learning_rate": 0.001,
"loss": 0.754,
"step": 50
},
{
"epoch": 0.08782201405152225,
"grad_norm": 0.4221147298812866,
"learning_rate": 0.0009925727866904337,
"loss": 0.6045,
"step": 75
},
{
"epoch": 0.117096018735363,
"grad_norm": 0.30049410462379456,
"learning_rate": 0.0009851455733808675,
"loss": 0.6137,
"step": 100
},
{
"epoch": 0.14637002341920374,
"grad_norm": 0.34277957677841187,
"learning_rate": 0.0009777183600713012,
"loss": 0.6348,
"step": 125
},
{
"epoch": 0.1756440281030445,
"grad_norm": 0.8166279196739197,
"learning_rate": 0.000970291146761735,
"loss": 0.6599,
"step": 150
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.5421469211578369,
"learning_rate": 0.0009628639334521688,
"loss": 0.6297,
"step": 175
},
{
"epoch": 0.234192037470726,
"grad_norm": 0.5719662308692932,
"learning_rate": 0.0009554367201426025,
"loss": 0.6566,
"step": 200
},
{
"epoch": 0.26346604215456676,
"grad_norm": 0.4597041606903076,
"learning_rate": 0.0009480095068330362,
"loss": 0.6108,
"step": 225
},
{
"epoch": 0.2927400468384075,
"grad_norm": 0.303480863571167,
"learning_rate": 0.00094058229352347,
"loss": 0.5868,
"step": 250
},
{
"epoch": 0.32201405152224827,
"grad_norm": 0.35486990213394165,
"learning_rate": 0.0009331550802139037,
"loss": 0.6076,
"step": 275
},
{
"epoch": 0.351288056206089,
"grad_norm": 0.5772029161453247,
"learning_rate": 0.0009257278669043375,
"loss": 0.6243,
"step": 300
},
{
"epoch": 0.3805620608899297,
"grad_norm": 0.32380449771881104,
"learning_rate": 0.0009183006535947712,
"loss": 0.6142,
"step": 325
},
{
"epoch": 0.4098360655737705,
"grad_norm": 0.2743474245071411,
"learning_rate": 0.000910873440285205,
"loss": 0.5561,
"step": 350
},
{
"epoch": 0.43911007025761123,
"grad_norm": 0.4696587026119232,
"learning_rate": 0.0009034462269756387,
"loss": 0.6096,
"step": 375
},
{
"epoch": 0.468384074941452,
"grad_norm": 0.3656092584133148,
"learning_rate": 0.0008960190136660726,
"loss": 0.6609,
"step": 400
},
{
"epoch": 0.49765807962529274,
"grad_norm": 0.704386293888092,
"learning_rate": 0.0008885918003565062,
"loss": 0.565,
"step": 425
},
{
"epoch": 0.5269320843091335,
"grad_norm": 0.6060842871665955,
"learning_rate": 0.0008811645870469401,
"loss": 0.6572,
"step": 450
},
{
"epoch": 0.5562060889929742,
"grad_norm": 0.4069805443286896,
"learning_rate": 0.0008737373737373737,
"loss": 0.557,
"step": 475
},
{
"epoch": 0.585480093676815,
"grad_norm": 0.45368218421936035,
"learning_rate": 0.0008663101604278076,
"loss": 0.5793,
"step": 500
},
{
"epoch": 0.585480093676815,
"eval_loss": 0.546061635017395,
"eval_runtime": 12387.0635,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 37.98093560260484,
"step": 500
}
],
"logging_steps": 25,
"max_steps": 3416,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.27355561984e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}