adapter_all_data / checkpoint-1185 /trainer_state.json
bitwisemind's picture
Checkpoint at step 1185
2d34418 verified
{
"best_metric": 37.68099852505035,
"best_model_checkpoint": "./whisper-lora-10k-adapters/checkpoint-711",
"epoch": 1.3875878220140514,
"eval_steps": 237,
"global_step": 1185,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02927400468384075,
"grad_norm": 0.578233540058136,
"learning_rate": 0.0005,
"loss": 0.99,
"step": 25
},
{
"epoch": 0.0585480093676815,
"grad_norm": 0.3136115074157715,
"learning_rate": 0.001,
"loss": 0.7869,
"step": 50
},
{
"epoch": 0.08782201405152225,
"grad_norm": 0.3556165397167206,
"learning_rate": 0.0009940758293838863,
"loss": 0.6559,
"step": 75
},
{
"epoch": 0.117096018735363,
"grad_norm": 0.43047836422920227,
"learning_rate": 0.0009881516587677726,
"loss": 0.6799,
"step": 100
},
{
"epoch": 0.14637002341920374,
"grad_norm": 0.41161859035491943,
"learning_rate": 0.0009822274881516586,
"loss": 0.6179,
"step": 125
},
{
"epoch": 0.1756440281030445,
"grad_norm": 0.3486640453338623,
"learning_rate": 0.000976303317535545,
"loss": 0.6218,
"step": 150
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.33961209654808044,
"learning_rate": 0.0009703791469194313,
"loss": 0.5623,
"step": 175
},
{
"epoch": 0.234192037470726,
"grad_norm": 0.4211539328098297,
"learning_rate": 0.0009644549763033176,
"loss": 0.6293,
"step": 200
},
{
"epoch": 0.26346604215456676,
"grad_norm": 0.4401342272758484,
"learning_rate": 0.0009585308056872039,
"loss": 0.65,
"step": 225
},
{
"epoch": 0.2775175644028103,
"eval_loss": 0.6182317733764648,
"eval_runtime": 12305.8898,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 42.11536326582399,
"step": 237
},
{
"epoch": 0.2927400468384075,
"grad_norm": 0.5222854614257812,
"learning_rate": 0.0009526066350710901,
"loss": 0.6494,
"step": 250
},
{
"epoch": 0.32201405152224827,
"grad_norm": 0.5739536285400391,
"learning_rate": 0.0009466824644549763,
"loss": 0.5656,
"step": 275
},
{
"epoch": 0.351288056206089,
"grad_norm": 0.4213266968727112,
"learning_rate": 0.0009407582938388626,
"loss": 0.5781,
"step": 300
},
{
"epoch": 0.3805620608899297,
"grad_norm": 0.5185717344284058,
"learning_rate": 0.0009348341232227489,
"loss": 0.5993,
"step": 325
},
{
"epoch": 0.4098360655737705,
"grad_norm": 0.41156110167503357,
"learning_rate": 0.0009289099526066352,
"loss": 0.5504,
"step": 350
},
{
"epoch": 0.43911007025761123,
"grad_norm": 0.44983068108558655,
"learning_rate": 0.0009229857819905212,
"loss": 0.642,
"step": 375
},
{
"epoch": 0.468384074941452,
"grad_norm": 0.7018289566040039,
"learning_rate": 0.0009170616113744075,
"loss": 0.6313,
"step": 400
},
{
"epoch": 0.49765807962529274,
"grad_norm": 0.41570019721984863,
"learning_rate": 0.0009111374407582938,
"loss": 0.642,
"step": 425
},
{
"epoch": 0.5269320843091335,
"grad_norm": 0.2906375229358673,
"learning_rate": 0.0009052132701421801,
"loss": 0.5501,
"step": 450
},
{
"epoch": 0.5550351288056206,
"eval_loss": 0.5893104076385498,
"eval_runtime": 12217.9829,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 39.71420868158533,
"step": 474
},
{
"epoch": 0.5562060889929742,
"grad_norm": 0.42602404952049255,
"learning_rate": 0.0008992890995260664,
"loss": 0.6419,
"step": 475
},
{
"epoch": 0.585480093676815,
"grad_norm": 0.45508912205696106,
"learning_rate": 0.0008933649289099525,
"loss": 0.5816,
"step": 500
},
{
"epoch": 0.6147540983606558,
"grad_norm": 0.5000929236412048,
"learning_rate": 0.0008874407582938388,
"loss": 0.6941,
"step": 525
},
{
"epoch": 0.6440281030444965,
"grad_norm": 0.4415169656276703,
"learning_rate": 0.0008815165876777251,
"loss": 0.5615,
"step": 550
},
{
"epoch": 0.6733021077283372,
"grad_norm": 0.5120753049850464,
"learning_rate": 0.0008755924170616114,
"loss": 0.559,
"step": 575
},
{
"epoch": 0.702576112412178,
"grad_norm": 0.3653784990310669,
"learning_rate": 0.0008696682464454977,
"loss": 0.5836,
"step": 600
},
{
"epoch": 0.7318501170960188,
"grad_norm": 0.5504665374755859,
"learning_rate": 0.0008637440758293838,
"loss": 0.6163,
"step": 625
},
{
"epoch": 0.7611241217798594,
"grad_norm": 0.49855440855026245,
"learning_rate": 0.0008578199052132701,
"loss": 0.5482,
"step": 650
},
{
"epoch": 0.7903981264637002,
"grad_norm": 0.3784034848213196,
"learning_rate": 0.0008518957345971564,
"loss": 0.5572,
"step": 675
},
{
"epoch": 0.819672131147541,
"grad_norm": 0.5111596584320068,
"learning_rate": 0.0008459715639810427,
"loss": 0.565,
"step": 700
},
{
"epoch": 0.832552693208431,
"eval_loss": 0.5823442339897156,
"eval_runtime": 12225.5714,
"eval_samples_per_second": 0.124,
"eval_steps_per_second": 0.008,
"eval_wer": 37.68099852505035,
"step": 711
},
{
"epoch": 0.8489461358313818,
"grad_norm": 0.5943437218666077,
"learning_rate": 0.000840047393364929,
"loss": 0.5146,
"step": 725
},
{
"epoch": 0.8782201405152225,
"grad_norm": 0.5228826403617859,
"learning_rate": 0.0008341232227488151,
"loss": 0.5338,
"step": 750
},
{
"epoch": 0.9074941451990632,
"grad_norm": 0.44550982117652893,
"learning_rate": 0.0008281990521327014,
"loss": 0.5631,
"step": 775
},
{
"epoch": 0.936768149882904,
"grad_norm": 0.5326892733573914,
"learning_rate": 0.0008222748815165877,
"loss": 0.5489,
"step": 800
},
{
"epoch": 0.9660421545667447,
"grad_norm": 0.5083812475204468,
"learning_rate": 0.000816350710900474,
"loss": 0.5336,
"step": 825
},
{
"epoch": 0.9953161592505855,
"grad_norm": 0.4346718192100525,
"learning_rate": 0.0008104265402843603,
"loss": 0.6155,
"step": 850
},
{
"epoch": 1.0245901639344261,
"grad_norm": 0.4419436454772949,
"learning_rate": 0.0008045023696682464,
"loss": 0.5506,
"step": 875
},
{
"epoch": 1.053864168618267,
"grad_norm": 0.5935924649238586,
"learning_rate": 0.0007985781990521327,
"loss": 0.5407,
"step": 900
},
{
"epoch": 1.0831381733021077,
"grad_norm": 0.4228830635547638,
"learning_rate": 0.000792654028436019,
"loss": 0.5527,
"step": 925
},
{
"epoch": 1.1100702576112411,
"eval_loss": 0.5060898065567017,
"eval_runtime": 12332.7416,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 37.76065419091052,
"step": 948
},
{
"epoch": 1.1124121779859484,
"grad_norm": 0.37129494547843933,
"learning_rate": 0.0007867298578199053,
"loss": 0.5191,
"step": 950
},
{
"epoch": 1.1416861826697893,
"grad_norm": 0.7254778146743774,
"learning_rate": 0.0007808056872037916,
"loss": 0.5537,
"step": 975
},
{
"epoch": 1.17096018735363,
"grad_norm": 0.4878183603286743,
"learning_rate": 0.0007748815165876777,
"loss": 0.5281,
"step": 1000
},
{
"epoch": 1.2002341920374708,
"grad_norm": 0.35084593296051025,
"learning_rate": 0.000768957345971564,
"loss": 0.5166,
"step": 1025
},
{
"epoch": 1.2295081967213115,
"grad_norm": 0.5030648708343506,
"learning_rate": 0.0007630331753554502,
"loss": 0.5284,
"step": 1050
},
{
"epoch": 1.2587822014051522,
"grad_norm": 0.5004339218139648,
"learning_rate": 0.0007571090047393365,
"loss": 0.5695,
"step": 1075
},
{
"epoch": 1.288056206088993,
"grad_norm": 0.5789551734924316,
"learning_rate": 0.0007511848341232228,
"loss": 0.5511,
"step": 1100
},
{
"epoch": 1.3173302107728337,
"grad_norm": 0.389371782541275,
"learning_rate": 0.0007452606635071089,
"loss": 0.5661,
"step": 1125
},
{
"epoch": 1.3466042154566744,
"grad_norm": 0.38161447644233704,
"learning_rate": 0.0007393364928909952,
"loss": 0.5087,
"step": 1150
},
{
"epoch": 1.3758782201405153,
"grad_norm": 0.40263721346855164,
"learning_rate": 0.0007334123222748815,
"loss": 0.5147,
"step": 1175
},
{
"epoch": 1.3875878220140514,
"eval_loss": 0.5079419016838074,
"eval_runtime": 12322.9428,
"eval_samples_per_second": 0.123,
"eval_steps_per_second": 0.008,
"eval_wer": 38.37238559521937,
"step": 1185
}
],
"logging_steps": 25,
"max_steps": 4270,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 237,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.96083268190208e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}