SALAMA_C7 / checkpoint-288 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
5e79ec1 verified
{
"best_global_step": 200,
"best_metric": 10.44776119402985,
"best_model_checkpoint": "./SALAMA_C7/checkpoint-200",
"epoch": 3.0,
"eval_steps": 200,
"global_step": 288,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20833333333333334,
"grad_norm": 11.919928550720215,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.6563,
"step": 20
},
{
"epoch": 0.4166666666666667,
"grad_norm": 11.068168640136719,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.5814,
"step": 40
},
{
"epoch": 0.625,
"grad_norm": 7.94372034072876,
"learning_rate": 5.8e-06,
"loss": 0.5414,
"step": 60
},
{
"epoch": 0.8333333333333334,
"grad_norm": 6.410305500030518,
"learning_rate": 7.800000000000002e-06,
"loss": 0.4745,
"step": 80
},
{
"epoch": 1.0416666666666667,
"grad_norm": 4.956066131591797,
"learning_rate": 9.800000000000001e-06,
"loss": 0.3529,
"step": 100
},
{
"epoch": 1.25,
"grad_norm": 3.679781436920166,
"learning_rate": 9.042553191489362e-06,
"loss": 0.2865,
"step": 120
},
{
"epoch": 1.4583333333333333,
"grad_norm": 3.3053739070892334,
"learning_rate": 7.97872340425532e-06,
"loss": 0.263,
"step": 140
},
{
"epoch": 1.6666666666666665,
"grad_norm": 5.734177589416504,
"learning_rate": 6.914893617021278e-06,
"loss": 0.2875,
"step": 160
},
{
"epoch": 1.875,
"grad_norm": 7.049015522003174,
"learning_rate": 5.851063829787235e-06,
"loss": 0.2821,
"step": 180
},
{
"epoch": 2.0833333333333335,
"grad_norm": 2.211012363433838,
"learning_rate": 4.787234042553192e-06,
"loss": 0.204,
"step": 200
},
{
"epoch": 2.0833333333333335,
"eval_loss": 0.18036405742168427,
"eval_runtime": 274.0379,
"eval_samples_per_second": 2.799,
"eval_steps_per_second": 0.35,
"eval_wer": 10.44776119402985,
"step": 200
},
{
"epoch": 2.2916666666666665,
"grad_norm": 4.120218276977539,
"learning_rate": 3.723404255319149e-06,
"loss": 0.1378,
"step": 220
},
{
"epoch": 2.5,
"grad_norm": 2.3007442951202393,
"learning_rate": 2.6595744680851065e-06,
"loss": 0.1111,
"step": 240
},
{
"epoch": 2.7083333333333335,
"grad_norm": 4.804255485534668,
"learning_rate": 1.595744680851064e-06,
"loss": 0.1654,
"step": 260
},
{
"epoch": 2.9166666666666665,
"grad_norm": 3.7491042613983154,
"learning_rate": 5.319148936170213e-07,
"loss": 0.1346,
"step": 280
}
],
"logging_steps": 20,
"max_steps": 288,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.65614002454528e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}