superb-si_hubert-base / trainer_state.json
jialicheng's picture
Upload folder using huggingface_hub
39e7a64 verified
{
"best_metric": 0.15773464658169178,
"best_model_checkpoint": "audio/train/checkpoint/hubert-base/superb_si_42/checkpoint-43240",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 43240,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.23126734505087881,
"grad_norm": 1.697458267211914,
"learning_rate": 1.1563367252543942e-05,
"loss": 7.1124,
"step": 1000
},
{
"epoch": 0.46253469010175763,
"grad_norm": 2.86542010307312,
"learning_rate": 2.3126734505087884e-05,
"loss": 6.9441,
"step": 2000
},
{
"epoch": 0.6938020351526365,
"grad_norm": 4.842338562011719,
"learning_rate": 3.469010175763183e-05,
"loss": 6.5221,
"step": 3000
},
{
"epoch": 0.9250693802035153,
"grad_norm": 5.53955602645874,
"learning_rate": 4.625346901017577e-05,
"loss": 6.2927,
"step": 4000
},
{
"epoch": 1.0,
"eval_accuracy": 0.004924681344148319,
"eval_loss": 6.532104969024658,
"eval_runtime": 123.1249,
"eval_samples_per_second": 56.073,
"eval_steps_per_second": 3.509,
"step": 4324
},
{
"epoch": 1.1563367252543941,
"grad_norm": 4.515321731567383,
"learning_rate": 4.913146263747559e-05,
"loss": 6.1013,
"step": 5000
},
{
"epoch": 1.3876040703052728,
"grad_norm": 6.986465930938721,
"learning_rate": 4.78466440538596e-05,
"loss": 5.9426,
"step": 6000
},
{
"epoch": 1.6188714153561516,
"grad_norm": 7.874293804168701,
"learning_rate": 4.65618254702436e-05,
"loss": 5.809,
"step": 7000
},
{
"epoch": 1.8501387604070305,
"grad_norm": 8.206330299377441,
"learning_rate": 4.527700688662761e-05,
"loss": 5.6909,
"step": 8000
},
{
"epoch": 2.0,
"eval_accuracy": 0.008835457705677868,
"eval_loss": 6.0103302001953125,
"eval_runtime": 120.1466,
"eval_samples_per_second": 57.463,
"eval_steps_per_second": 3.596,
"step": 8648
},
{
"epoch": 2.0814061054579094,
"grad_norm": 9.057954788208008,
"learning_rate": 4.399218830301162e-05,
"loss": 5.5852,
"step": 9000
},
{
"epoch": 2.3126734505087883,
"grad_norm": 10.034080505371094,
"learning_rate": 4.2707369719395625e-05,
"loss": 5.5059,
"step": 10000
},
{
"epoch": 2.543940795559667,
"grad_norm": 9.717106819152832,
"learning_rate": 4.142255113577963e-05,
"loss": 5.4348,
"step": 11000
},
{
"epoch": 2.7752081406105455,
"grad_norm": 9.622520446777344,
"learning_rate": 4.0137732552163636e-05,
"loss": 5.3829,
"step": 12000
},
{
"epoch": 3.0,
"eval_accuracy": 0.02346465816917729,
"eval_loss": 5.530889511108398,
"eval_runtime": 120.1431,
"eval_samples_per_second": 57.465,
"eval_steps_per_second": 3.596,
"step": 12972
},
{
"epoch": 3.0064754856614244,
"grad_norm": 10.915121078491211,
"learning_rate": 3.885291396854764e-05,
"loss": 5.2762,
"step": 13000
},
{
"epoch": 3.2377428307123033,
"grad_norm": 10.905006408691406,
"learning_rate": 3.756809538493165e-05,
"loss": 5.196,
"step": 14000
},
{
"epoch": 3.469010175763182,
"grad_norm": 10.589766502380371,
"learning_rate": 3.628327680131566e-05,
"loss": 5.1321,
"step": 15000
},
{
"epoch": 3.700277520814061,
"grad_norm": 11.086525917053223,
"learning_rate": 3.4998458217699664e-05,
"loss": 5.0639,
"step": 16000
},
{
"epoch": 3.93154486586494,
"grad_norm": 12.314041137695312,
"learning_rate": 3.371363963408367e-05,
"loss": 4.9995,
"step": 17000
},
{
"epoch": 4.0,
"eval_accuracy": 0.03794901506373117,
"eval_loss": 5.1893744468688965,
"eval_runtime": 119.905,
"eval_samples_per_second": 57.579,
"eval_steps_per_second": 3.603,
"step": 17296
},
{
"epoch": 4.162812210915819,
"grad_norm": 10.940951347351074,
"learning_rate": 3.242882105046768e-05,
"loss": 4.9387,
"step": 18000
},
{
"epoch": 4.394079555966697,
"grad_norm": 12.118027687072754,
"learning_rate": 3.114400246685168e-05,
"loss": 4.8872,
"step": 19000
},
{
"epoch": 4.6253469010175765,
"grad_norm": 13.738452911376953,
"learning_rate": 2.9859183883235685e-05,
"loss": 4.8266,
"step": 20000
},
{
"epoch": 4.856614246068455,
"grad_norm": 12.399888038635254,
"learning_rate": 2.8574365299619694e-05,
"loss": 4.7591,
"step": 21000
},
{
"epoch": 5.0,
"eval_accuracy": 0.06199304750869061,
"eval_loss": 4.864396572113037,
"eval_runtime": 120.1129,
"eval_samples_per_second": 57.479,
"eval_steps_per_second": 3.597,
"step": 21620
},
{
"epoch": 5.087881591119334,
"grad_norm": 12.469327926635742,
"learning_rate": 2.72895467160037e-05,
"loss": 4.7002,
"step": 22000
},
{
"epoch": 5.319148936170213,
"grad_norm": 14.091848373413086,
"learning_rate": 2.6004728132387708e-05,
"loss": 4.641,
"step": 23000
},
{
"epoch": 5.550416281221091,
"grad_norm": 12.042238235473633,
"learning_rate": 2.4719909548771713e-05,
"loss": 4.5869,
"step": 24000
},
{
"epoch": 5.78168362627197,
"grad_norm": 13.945825576782227,
"learning_rate": 2.343509096515572e-05,
"loss": 4.5243,
"step": 25000
},
{
"epoch": 6.0,
"eval_accuracy": 0.08589223638470452,
"eval_loss": 4.604219436645508,
"eval_runtime": 119.7064,
"eval_samples_per_second": 57.674,
"eval_steps_per_second": 3.609,
"step": 25944
},
{
"epoch": 6.012950971322849,
"grad_norm": 14.026973724365234,
"learning_rate": 2.2150272381539727e-05,
"loss": 4.4896,
"step": 26000
},
{
"epoch": 6.244218316373728,
"grad_norm": 15.273303985595703,
"learning_rate": 2.0865453797923736e-05,
"loss": 4.4312,
"step": 27000
},
{
"epoch": 6.475485661424607,
"grad_norm": 13.617254257202148,
"learning_rate": 1.958063521430774e-05,
"loss": 4.3541,
"step": 28000
},
{
"epoch": 6.706753006475486,
"grad_norm": 14.490744590759277,
"learning_rate": 1.8295816630691746e-05,
"loss": 4.3084,
"step": 29000
},
{
"epoch": 6.938020351526364,
"grad_norm": 16.995948791503906,
"learning_rate": 1.7010998047075755e-05,
"loss": 4.2486,
"step": 30000
},
{
"epoch": 7.0,
"eval_accuracy": 0.11732329084588644,
"eval_loss": 4.349672317504883,
"eval_runtime": 119.7126,
"eval_samples_per_second": 57.671,
"eval_steps_per_second": 3.609,
"step": 30268
},
{
"epoch": 7.169287696577244,
"grad_norm": 14.805612564086914,
"learning_rate": 1.572617946345976e-05,
"loss": 4.1788,
"step": 31000
},
{
"epoch": 7.400555041628122,
"grad_norm": 15.498200416564941,
"learning_rate": 1.4441360879843766e-05,
"loss": 4.1536,
"step": 32000
},
{
"epoch": 7.631822386679001,
"grad_norm": 14.68932819366455,
"learning_rate": 1.3156542296227773e-05,
"loss": 4.1161,
"step": 33000
},
{
"epoch": 7.86308973172988,
"grad_norm": 14.384370803833008,
"learning_rate": 1.187172371261178e-05,
"loss": 4.0813,
"step": 34000
},
{
"epoch": 8.0,
"eval_accuracy": 0.14049826187717265,
"eval_loss": 4.121533393859863,
"eval_runtime": 121.082,
"eval_samples_per_second": 57.019,
"eval_steps_per_second": 3.568,
"step": 34592
},
{
"epoch": 8.094357076780758,
"grad_norm": 15.307478904724121,
"learning_rate": 1.0586905128995787e-05,
"loss": 4.0266,
"step": 35000
},
{
"epoch": 8.325624421831638,
"grad_norm": 11.981130599975586,
"learning_rate": 9.302086545379794e-06,
"loss": 3.9985,
"step": 36000
},
{
"epoch": 8.556891766882517,
"grad_norm": 15.051200866699219,
"learning_rate": 8.017267961763799e-06,
"loss": 3.9549,
"step": 37000
},
{
"epoch": 8.788159111933394,
"grad_norm": 16.70930290222168,
"learning_rate": 6.732449378147807e-06,
"loss": 3.9317,
"step": 38000
},
{
"epoch": 9.0,
"eval_accuracy": 0.1507821552723059,
"eval_loss": 4.043083190917969,
"eval_runtime": 120.5834,
"eval_samples_per_second": 57.255,
"eval_steps_per_second": 3.583,
"step": 38916
},
{
"epoch": 9.019426456984274,
"grad_norm": 15.108479499816895,
"learning_rate": 5.447630794531813e-06,
"loss": 3.9349,
"step": 39000
},
{
"epoch": 9.250693802035153,
"grad_norm": 16.258913040161133,
"learning_rate": 4.162812210915819e-06,
"loss": 3.8783,
"step": 40000
},
{
"epoch": 9.481961147086032,
"grad_norm": 15.729056358337402,
"learning_rate": 2.877993627299825e-06,
"loss": 3.8587,
"step": 41000
},
{
"epoch": 9.71322849213691,
"grad_norm": 15.907846450805664,
"learning_rate": 1.5931750436838318e-06,
"loss": 3.8851,
"step": 42000
},
{
"epoch": 9.94449583718779,
"grad_norm": 12.73677921295166,
"learning_rate": 3.0835646006783846e-07,
"loss": 3.8568,
"step": 43000
},
{
"epoch": 10.0,
"eval_accuracy": 0.15773464658169178,
"eval_loss": 3.998060941696167,
"eval_runtime": 121.002,
"eval_samples_per_second": 57.057,
"eval_steps_per_second": 3.57,
"step": 43240
},
{
"epoch": 10.0,
"step": 43240,
"total_flos": 1.260392330300448e+19,
"train_loss": 4.86731394327536,
"train_runtime": 3865.9889,
"train_samples_per_second": 357.893,
"train_steps_per_second": 11.185
}
],
"logging_steps": 1000,
"max_steps": 43240,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 1.260392330300448e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}