bitwisemind's picture
Checkpoint at step 1000
f38cd07 verified
{
"best_metric": 34.90263315191745,
"best_model_checkpoint": "./whisper-lora-15k-adapters/checkpoint-1000",
"epoch": 1.17096018735363,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02927400468384075,
"grad_norm": 0.49275097250938416,
"learning_rate": 0.0005,
"loss": 0.9988,
"step": 25
},
{
"epoch": 0.0585480093676815,
"grad_norm": 0.5272337794303894,
"learning_rate": 0.001,
"loss": 0.754,
"step": 50
},
{
"epoch": 0.08782201405152225,
"grad_norm": 0.4221147298812866,
"learning_rate": 0.0009925727866904337,
"loss": 0.6045,
"step": 75
},
{
"epoch": 0.117096018735363,
"grad_norm": 0.30049410462379456,
"learning_rate": 0.0009851455733808675,
"loss": 0.6137,
"step": 100
},
{
"epoch": 0.14637002341920374,
"grad_norm": 0.34277957677841187,
"learning_rate": 0.0009777183600713012,
"loss": 0.6348,
"step": 125
},
{
"epoch": 0.1756440281030445,
"grad_norm": 0.8166279196739197,
"learning_rate": 0.000970291146761735,
"loss": 0.6599,
"step": 150
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.5421469211578369,
"learning_rate": 0.0009628639334521688,
"loss": 0.6297,
"step": 175
},
{
"epoch": 0.234192037470726,
"grad_norm": 0.5719662308692932,
"learning_rate": 0.0009554367201426025,
"loss": 0.6566,
"step": 200
},
{
"epoch": 0.26346604215456676,
"grad_norm": 0.4597041606903076,
"learning_rate": 0.0009480095068330362,
"loss": 0.6108,
"step": 225
},
{
"epoch": 0.2927400468384075,
"grad_norm": 0.303480863571167,
"learning_rate": 0.00094058229352347,
"loss": 0.5868,
"step": 250
},
{
"epoch": 0.32201405152224827,
"grad_norm": 0.35486990213394165,
"learning_rate": 0.0009331550802139037,
"loss": 0.6076,
"step": 275
},
{
"epoch": 0.351288056206089,
"grad_norm": 0.5772029161453247,
"learning_rate": 0.0009257278669043375,
"loss": 0.6243,
"step": 300
},
{
"epoch": 0.3805620608899297,
"grad_norm": 0.32380449771881104,
"learning_rate": 0.0009183006535947712,
"loss": 0.6142,
"step": 325
},
{
"epoch": 0.4098360655737705,
"grad_norm": 0.2743474245071411,
"learning_rate": 0.000910873440285205,
"loss": 0.5561,
"step": 350
},
{
"epoch": 0.43911007025761123,
"grad_norm": 0.4696587026119232,
"learning_rate": 0.0009034462269756387,
"loss": 0.6096,
"step": 375
},
{
"epoch": 0.468384074941452,
"grad_norm": 0.3656092584133148,
"learning_rate": 0.0008960190136660726,
"loss": 0.6609,
"step": 400
},
{
"epoch": 0.49765807962529274,
"grad_norm": 0.704386293888092,
"learning_rate": 0.0008885918003565062,
"loss": 0.565,
"step": 425
},
{
"epoch": 0.5269320843091335,
"grad_norm": 0.6060842871665955,
"learning_rate": 0.0008811645870469401,
"loss": 0.6572,
"step": 450
},
{
"epoch": 0.5562060889929742,
"grad_norm": 0.4069805443286896,
"learning_rate": 0.0008737373737373737,
"loss": 0.557,
"step": 475
},
{
"epoch": 0.585480093676815,
"grad_norm": 0.45368218421936035,
"learning_rate": 0.0008663101604278076,
"loss": 0.5793,
"step": 500
},
{
"epoch": 0.585480093676815,
"eval_loss": 0.546061635017395,
"eval_runtime": 12387.0635,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 37.98093560260484,
"step": 500
},
{
"epoch": 0.6147540983606558,
"grad_norm": 0.5809288620948792,
"learning_rate": 0.0008588829471182412,
"loss": 0.5512,
"step": 525
},
{
"epoch": 0.6440281030444965,
"grad_norm": 0.9479708671569824,
"learning_rate": 0.000851455733808675,
"loss": 0.6098,
"step": 550
},
{
"epoch": 0.6733021077283372,
"grad_norm": 0.38643014430999756,
"learning_rate": 0.0008440285204991087,
"loss": 0.5915,
"step": 575
},
{
"epoch": 0.702576112412178,
"grad_norm": 0.5177704095840454,
"learning_rate": 0.0008366013071895425,
"loss": 0.5909,
"step": 600
},
{
"epoch": 0.7318501170960188,
"grad_norm": 0.39607977867126465,
"learning_rate": 0.0008291740938799762,
"loss": 0.5783,
"step": 625
},
{
"epoch": 0.7611241217798594,
"grad_norm": 0.5243889689445496,
"learning_rate": 0.00082174688057041,
"loss": 0.5573,
"step": 650
},
{
"epoch": 0.7903981264637002,
"grad_norm": 0.38120409846305847,
"learning_rate": 0.0008143196672608437,
"loss": 0.6463,
"step": 675
},
{
"epoch": 0.819672131147541,
"grad_norm": 0.3815406858921051,
"learning_rate": 0.0008068924539512775,
"loss": 0.6244,
"step": 700
},
{
"epoch": 0.8489461358313818,
"grad_norm": 0.49876636266708374,
"learning_rate": 0.0007994652406417113,
"loss": 0.6347,
"step": 725
},
{
"epoch": 0.8782201405152225,
"grad_norm": 0.36918649077415466,
"learning_rate": 0.000792038027332145,
"loss": 0.5391,
"step": 750
},
{
"epoch": 0.9074941451990632,
"grad_norm": 0.4347202479839325,
"learning_rate": 0.0007846108140225788,
"loss": 0.6166,
"step": 775
},
{
"epoch": 0.936768149882904,
"grad_norm": 0.4877653419971466,
"learning_rate": 0.0007771836007130125,
"loss": 0.5318,
"step": 800
},
{
"epoch": 0.9660421545667447,
"grad_norm": 0.40555697679519653,
"learning_rate": 0.0007697563874034463,
"loss": 0.5867,
"step": 825
},
{
"epoch": 0.9953161592505855,
"grad_norm": 0.47605931758880615,
"learning_rate": 0.00076232917409388,
"loss": 0.578,
"step": 850
},
{
"epoch": 1.0245901639344261,
"grad_norm": 0.43946486711502075,
"learning_rate": 0.0007549019607843137,
"loss": 0.4755,
"step": 875
},
{
"epoch": 1.053864168618267,
"grad_norm": 0.3787698745727539,
"learning_rate": 0.0007474747474747475,
"loss": 0.554,
"step": 900
},
{
"epoch": 1.0831381733021077,
"grad_norm": 0.41880446672439575,
"learning_rate": 0.0007400475341651812,
"loss": 0.4911,
"step": 925
},
{
"epoch": 1.1124121779859484,
"grad_norm": 0.4066482484340668,
"learning_rate": 0.000732620320855615,
"loss": 0.5064,
"step": 950
},
{
"epoch": 1.1416861826697893,
"grad_norm": 0.2495754361152649,
"learning_rate": 0.0007251931075460487,
"loss": 0.5102,
"step": 975
},
{
"epoch": 1.17096018735363,
"grad_norm": 0.44539883732795715,
"learning_rate": 0.0007177658942364825,
"loss": 0.4371,
"step": 1000
},
{
"epoch": 1.17096018735363,
"eval_loss": 0.5167025923728943,
"eval_runtime": 12236.3244,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 34.90263315191745,
"step": 1000
}
],
"logging_steps": 25,
"max_steps": 3416,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.654711123968e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}