SALAMA_C7 / checkpoint-600 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
d69def8 verified
{
"best_global_step": 600,
"best_metric": 2.805308247942214,
"best_model_checkpoint": "./SALAMA_C7/checkpoint-600",
"epoch": 6.25,
"eval_steps": 200,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20833333333333334,
"grad_norm": 2.567998170852661,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.1774,
"step": 20
},
{
"epoch": 0.4166666666666667,
"grad_norm": 3.8020272254943848,
"learning_rate": 3.900000000000001e-06,
"loss": 0.1662,
"step": 40
},
{
"epoch": 0.625,
"grad_norm": 2.032432794570923,
"learning_rate": 5.9e-06,
"loss": 0.1198,
"step": 60
},
{
"epoch": 0.8333333333333334,
"grad_norm": 1.15700101852417,
"learning_rate": 7.9e-06,
"loss": 0.109,
"step": 80
},
{
"epoch": 1.0416666666666667,
"grad_norm": 3.478109121322632,
"learning_rate": 9.9e-06,
"loss": 0.0838,
"step": 100
},
{
"epoch": 1.25,
"grad_norm": 1.5184258222579956,
"learning_rate": 9.779069767441862e-06,
"loss": 0.0669,
"step": 120
},
{
"epoch": 1.4583333333333333,
"grad_norm": 4.6623759269714355,
"learning_rate": 9.546511627906978e-06,
"loss": 0.0833,
"step": 140
},
{
"epoch": 1.6666666666666665,
"grad_norm": 3.8098835945129395,
"learning_rate": 9.313953488372095e-06,
"loss": 0.0915,
"step": 160
},
{
"epoch": 1.875,
"grad_norm": 2.154282331466675,
"learning_rate": 9.08139534883721e-06,
"loss": 0.0876,
"step": 180
},
{
"epoch": 2.0833333333333335,
"grad_norm": 2.825495719909668,
"learning_rate": 8.848837209302326e-06,
"loss": 0.1009,
"step": 200
},
{
"epoch": 2.0833333333333335,
"eval_loss": 0.08810210227966309,
"eval_runtime": 266.2349,
"eval_samples_per_second": 2.881,
"eval_steps_per_second": 0.361,
"eval_wer": 6.366537880060474,
"step": 200
},
{
"epoch": 2.2916666666666665,
"grad_norm": 2.1308014392852783,
"learning_rate": 8.616279069767443e-06,
"loss": 0.0388,
"step": 220
},
{
"epoch": 2.5,
"grad_norm": 3.1350250244140625,
"learning_rate": 8.383720930232559e-06,
"loss": 0.0438,
"step": 240
},
{
"epoch": 2.7083333333333335,
"grad_norm": 2.3798017501831055,
"learning_rate": 8.151162790697676e-06,
"loss": 0.038,
"step": 260
},
{
"epoch": 2.9166666666666665,
"grad_norm": 2.5995683670043945,
"learning_rate": 7.918604651162792e-06,
"loss": 0.0352,
"step": 280
},
{
"epoch": 3.125,
"grad_norm": 0.5472909212112427,
"learning_rate": 7.686046511627909e-06,
"loss": 0.0239,
"step": 300
},
{
"epoch": 3.3333333333333335,
"grad_norm": 1.4134125709533691,
"learning_rate": 7.453488372093024e-06,
"loss": 0.0168,
"step": 320
},
{
"epoch": 3.5416666666666665,
"grad_norm": 0.483494371175766,
"learning_rate": 7.22093023255814e-06,
"loss": 0.0138,
"step": 340
},
{
"epoch": 3.75,
"grad_norm": 1.7438899278640747,
"learning_rate": 6.988372093023257e-06,
"loss": 0.0169,
"step": 360
},
{
"epoch": 3.9583333333333335,
"grad_norm": 2.4661247730255127,
"learning_rate": 6.755813953488373e-06,
"loss": 0.0166,
"step": 380
},
{
"epoch": 4.166666666666667,
"grad_norm": 1.0261869430541992,
"learning_rate": 6.5232558139534885e-06,
"loss": 0.014,
"step": 400
},
{
"epoch": 4.166666666666667,
"eval_loss": 0.0717792809009552,
"eval_runtime": 266.6355,
"eval_samples_per_second": 2.877,
"eval_steps_per_second": 0.36,
"eval_wer": 3.376448849319671,
"step": 400
},
{
"epoch": 4.375,
"grad_norm": 3.9566986560821533,
"learning_rate": 6.290697674418606e-06,
"loss": 0.0093,
"step": 420
},
{
"epoch": 4.583333333333333,
"grad_norm": 0.1707661747932434,
"learning_rate": 6.0581395348837215e-06,
"loss": 0.0044,
"step": 440
},
{
"epoch": 4.791666666666667,
"grad_norm": 1.1894892454147339,
"learning_rate": 5.825581395348837e-06,
"loss": 0.0078,
"step": 460
},
{
"epoch": 5.0,
"grad_norm": 0.5132602453231812,
"learning_rate": 5.5930232558139544e-06,
"loss": 0.0067,
"step": 480
},
{
"epoch": 5.208333333333333,
"grad_norm": 0.06318831443786621,
"learning_rate": 5.36046511627907e-06,
"loss": 0.0038,
"step": 500
},
{
"epoch": 5.416666666666667,
"grad_norm": 0.09762592613697052,
"learning_rate": 5.127906976744187e-06,
"loss": 0.0034,
"step": 520
},
{
"epoch": 5.625,
"grad_norm": 0.9855716228485107,
"learning_rate": 4.895348837209303e-06,
"loss": 0.0039,
"step": 540
},
{
"epoch": 5.833333333333333,
"grad_norm": 0.20474685728549957,
"learning_rate": 4.66279069767442e-06,
"loss": 0.004,
"step": 560
},
{
"epoch": 6.041666666666667,
"grad_norm": 0.29130417108535767,
"learning_rate": 4.430232558139535e-06,
"loss": 0.004,
"step": 580
},
{
"epoch": 6.25,
"grad_norm": 0.03812503069639206,
"learning_rate": 4.197674418604652e-06,
"loss": 0.002,
"step": 600
},
{
"epoch": 6.25,
"eval_loss": 0.07055042684078217,
"eval_runtime": 268.98,
"eval_samples_per_second": 2.852,
"eval_steps_per_second": 0.357,
"eval_wer": 2.805308247942214,
"step": 600
}
],
"logging_steps": 20,
"max_steps": 960,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.53391363653632e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}