HAUSA_B / checkpoint-600 /trainer_state.json
EYEDOL's picture
Upload folder using huggingface_hub
62af3f6 verified
{
"best_global_step": 600,
"best_metric": 44.61419121291129,
"best_model_checkpoint": "./HAUSA_B/checkpoint-600",
"epoch": 0.8124576844955992,
"eval_steps": 200,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.027081922816519974,
"grad_norm": 106.59215545654297,
"learning_rate": 8.000000000000001e-07,
"loss": 6.1915,
"step": 20
},
{
"epoch": 0.05416384563303995,
"grad_norm": 30.563518524169922,
"learning_rate": 1.8000000000000001e-06,
"loss": 4.5886,
"step": 40
},
{
"epoch": 0.08124576844955991,
"grad_norm": 16.528457641601562,
"learning_rate": 2.8000000000000003e-06,
"loss": 3.0338,
"step": 60
},
{
"epoch": 0.1083276912660799,
"grad_norm": 13.852096557617188,
"learning_rate": 3.8000000000000005e-06,
"loss": 2.3188,
"step": 80
},
{
"epoch": 0.13540961408259986,
"grad_norm": 13.284646987915039,
"learning_rate": 4.800000000000001e-06,
"loss": 1.9725,
"step": 100
},
{
"epoch": 0.16249153689911983,
"grad_norm": 13.212055206298828,
"learning_rate": 5.8e-06,
"loss": 1.7839,
"step": 120
},
{
"epoch": 0.1895734597156398,
"grad_norm": 12.006990432739258,
"learning_rate": 6.800000000000001e-06,
"loss": 1.5991,
"step": 140
},
{
"epoch": 0.2166553825321598,
"grad_norm": 12.490514755249023,
"learning_rate": 7.800000000000002e-06,
"loss": 1.4411,
"step": 160
},
{
"epoch": 0.24373730534867977,
"grad_norm": 11.587646484375,
"learning_rate": 8.8e-06,
"loss": 1.3487,
"step": 180
},
{
"epoch": 0.2708192281651997,
"grad_norm": 11.574933052062988,
"learning_rate": 9.800000000000001e-06,
"loss": 1.2119,
"step": 200
},
{
"epoch": 0.2708192281651997,
"eval_loss": 1.1157827377319336,
"eval_runtime": 1601.2991,
"eval_samples_per_second": 3.688,
"eval_steps_per_second": 0.462,
"eval_wer": 64.22461160550755,
"step": 200
},
{
"epoch": 0.2979011509817197,
"grad_norm": 8.368565559387207,
"learning_rate": 9.920556107249256e-06,
"loss": 0.9452,
"step": 220
},
{
"epoch": 0.32498307379823965,
"grad_norm": 7.2616682052612305,
"learning_rate": 9.821251241310825e-06,
"loss": 0.9298,
"step": 240
},
{
"epoch": 0.35206499661475965,
"grad_norm": 7.266394138336182,
"learning_rate": 9.721946375372395e-06,
"loss": 0.8567,
"step": 260
},
{
"epoch": 0.3791469194312796,
"grad_norm": 8.120067596435547,
"learning_rate": 9.622641509433963e-06,
"loss": 0.8251,
"step": 280
},
{
"epoch": 0.4062288422477996,
"grad_norm": 7.483547687530518,
"learning_rate": 9.523336643495532e-06,
"loss": 0.7774,
"step": 300
},
{
"epoch": 0.4333107650643196,
"grad_norm": 7.521896839141846,
"learning_rate": 9.4240317775571e-06,
"loss": 0.7266,
"step": 320
},
{
"epoch": 0.46039268788083954,
"grad_norm": 7.073266506195068,
"learning_rate": 9.32472691161867e-06,
"loss": 0.7248,
"step": 340
},
{
"epoch": 0.48747461069735953,
"grad_norm": 6.335423469543457,
"learning_rate": 9.22542204568024e-06,
"loss": 0.7151,
"step": 360
},
{
"epoch": 0.5145565335138795,
"grad_norm": 6.936922550201416,
"learning_rate": 9.126117179741808e-06,
"loss": 0.6881,
"step": 380
},
{
"epoch": 0.5416384563303994,
"grad_norm": 7.596807479858398,
"learning_rate": 9.026812313803377e-06,
"loss": 0.6995,
"step": 400
},
{
"epoch": 0.5416384563303994,
"eval_loss": 0.6352065801620483,
"eval_runtime": 1597.3635,
"eval_samples_per_second": 3.697,
"eval_steps_per_second": 0.463,
"eval_wer": 51.63653601672089,
"step": 400
},
{
"epoch": 0.5687203791469194,
"grad_norm": 6.981812477111816,
"learning_rate": 8.927507447864945e-06,
"loss": 0.6795,
"step": 420
},
{
"epoch": 0.5958023019634394,
"grad_norm": 6.481506824493408,
"learning_rate": 8.828202581926516e-06,
"loss": 0.6643,
"step": 440
},
{
"epoch": 0.6228842247799594,
"grad_norm": 6.537086009979248,
"learning_rate": 8.728897715988084e-06,
"loss": 0.6459,
"step": 460
},
{
"epoch": 0.6499661475964793,
"grad_norm": 6.739567756652832,
"learning_rate": 8.629592850049653e-06,
"loss": 0.6736,
"step": 480
},
{
"epoch": 0.6770480704129993,
"grad_norm": 7.422546863555908,
"learning_rate": 8.530287984111221e-06,
"loss": 0.6591,
"step": 500
},
{
"epoch": 0.7041299932295193,
"grad_norm": 5.7051215171813965,
"learning_rate": 8.430983118172792e-06,
"loss": 0.6442,
"step": 520
},
{
"epoch": 0.7312119160460393,
"grad_norm": 7.166143417358398,
"learning_rate": 8.33167825223436e-06,
"loss": 0.6326,
"step": 540
},
{
"epoch": 0.7582938388625592,
"grad_norm": 7.759460926055908,
"learning_rate": 8.232373386295929e-06,
"loss": 0.6328,
"step": 560
},
{
"epoch": 0.7853757616790792,
"grad_norm": 5.876537799835205,
"learning_rate": 8.133068520357497e-06,
"loss": 0.5945,
"step": 580
},
{
"epoch": 0.8124576844955992,
"grad_norm": 6.475106716156006,
"learning_rate": 8.033763654419066e-06,
"loss": 0.6038,
"step": 600
},
{
"epoch": 0.8124576844955992,
"eval_loss": 0.5318673849105835,
"eval_runtime": 1614.0202,
"eval_samples_per_second": 3.659,
"eval_steps_per_second": 0.458,
"eval_wer": 44.61419121291129,
"step": 600
}
],
"logging_steps": 20,
"max_steps": 2214,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.540839686144e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}