bert-samudra / trainer_state.json
adityoAJA's picture
Upload 10 files
dcdaad3 verified
{
"best_global_step": 425,
"best_metric": 0.9176470588235294,
"best_model_checkpoint": "./bert-samudra-model\\checkpoint-425",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 425,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11764705882352941,
"grad_norm": 10.0350341796875,
"learning_rate": 1.9894117647058826e-05,
"loss": 3.8456,
"step": 10
},
{
"epoch": 0.23529411764705882,
"grad_norm": 10.431258201599121,
"learning_rate": 1.9776470588235296e-05,
"loss": 3.826,
"step": 20
},
{
"epoch": 0.35294117647058826,
"grad_norm": 11.249832153320312,
"learning_rate": 1.9658823529411765e-05,
"loss": 3.6472,
"step": 30
},
{
"epoch": 0.47058823529411764,
"grad_norm": 10.304410934448242,
"learning_rate": 1.9541176470588235e-05,
"loss": 3.5359,
"step": 40
},
{
"epoch": 0.5882352941176471,
"grad_norm": 11.847091674804688,
"learning_rate": 1.942352941176471e-05,
"loss": 3.3157,
"step": 50
},
{
"epoch": 0.7058823529411765,
"grad_norm": 9.621623039245605,
"learning_rate": 1.930588235294118e-05,
"loss": 3.1498,
"step": 60
},
{
"epoch": 0.8235294117647058,
"grad_norm": 11.151117324829102,
"learning_rate": 1.918823529411765e-05,
"loss": 2.9821,
"step": 70
},
{
"epoch": 0.9411764705882353,
"grad_norm": 9.83305549621582,
"learning_rate": 1.907058823529412e-05,
"loss": 2.9091,
"step": 80
},
{
"epoch": 1.0,
"eval_accuracy": 0.5294117647058824,
"eval_loss": 2.518275737762451,
"eval_runtime": 2.8699,
"eval_samples_per_second": 59.235,
"eval_steps_per_second": 7.666,
"step": 85
},
{
"epoch": 1.0588235294117647,
"grad_norm": 9.598007202148438,
"learning_rate": 1.895294117647059e-05,
"loss": 2.5816,
"step": 90
},
{
"epoch": 1.1764705882352942,
"grad_norm": 9.898895263671875,
"learning_rate": 1.883529411764706e-05,
"loss": 2.3487,
"step": 100
},
{
"epoch": 1.2941176470588236,
"grad_norm": 11.571386337280273,
"learning_rate": 1.871764705882353e-05,
"loss": 2.1136,
"step": 110
},
{
"epoch": 1.4117647058823528,
"grad_norm": 11.180049896240234,
"learning_rate": 1.86e-05,
"loss": 2.0731,
"step": 120
},
{
"epoch": 1.5294117647058822,
"grad_norm": 9.434500694274902,
"learning_rate": 1.848235294117647e-05,
"loss": 1.9983,
"step": 130
},
{
"epoch": 1.6470588235294117,
"grad_norm": 8.993762969970703,
"learning_rate": 1.836470588235294e-05,
"loss": 1.7445,
"step": 140
},
{
"epoch": 1.7647058823529411,
"grad_norm": 9.424057960510254,
"learning_rate": 1.8247058823529415e-05,
"loss": 1.7651,
"step": 150
},
{
"epoch": 1.8823529411764706,
"grad_norm": 8.946263313293457,
"learning_rate": 1.8129411764705885e-05,
"loss": 1.6938,
"step": 160
},
{
"epoch": 2.0,
"grad_norm": 9.5614013671875,
"learning_rate": 1.8011764705882354e-05,
"loss": 1.539,
"step": 170
},
{
"epoch": 2.0,
"eval_accuracy": 0.8117647058823529,
"eval_loss": 1.4311776161193848,
"eval_runtime": 2.9195,
"eval_samples_per_second": 58.23,
"eval_steps_per_second": 7.536,
"step": 170
},
{
"epoch": 2.1176470588235294,
"grad_norm": 8.067319869995117,
"learning_rate": 1.7894117647058824e-05,
"loss": 1.2424,
"step": 180
},
{
"epoch": 2.235294117647059,
"grad_norm": 9.209125518798828,
"learning_rate": 1.7776470588235294e-05,
"loss": 1.1468,
"step": 190
},
{
"epoch": 2.3529411764705883,
"grad_norm": 9.35853099822998,
"learning_rate": 1.7658823529411768e-05,
"loss": 1.0893,
"step": 200
},
{
"epoch": 2.4705882352941178,
"grad_norm": 7.949193477630615,
"learning_rate": 1.7541176470588238e-05,
"loss": 1.1078,
"step": 210
},
{
"epoch": 2.588235294117647,
"grad_norm": 10.602166175842285,
"learning_rate": 1.7423529411764707e-05,
"loss": 0.9902,
"step": 220
},
{
"epoch": 2.7058823529411766,
"grad_norm": 8.58541488647461,
"learning_rate": 1.7305882352941177e-05,
"loss": 0.9298,
"step": 230
},
{
"epoch": 2.8235294117647056,
"grad_norm": 6.725298881530762,
"learning_rate": 1.7188235294117647e-05,
"loss": 0.8838,
"step": 240
},
{
"epoch": 2.9411764705882355,
"grad_norm": 8.761615753173828,
"learning_rate": 1.7070588235294117e-05,
"loss": 0.8112,
"step": 250
},
{
"epoch": 3.0,
"eval_accuracy": 0.888235294117647,
"eval_loss": 0.8797827959060669,
"eval_runtime": 2.1675,
"eval_samples_per_second": 78.433,
"eval_steps_per_second": 10.15,
"step": 255
},
{
"epoch": 3.0588235294117645,
"grad_norm": 5.102208137512207,
"learning_rate": 1.695294117647059e-05,
"loss": 0.7609,
"step": 260
},
{
"epoch": 3.176470588235294,
"grad_norm": 8.779169082641602,
"learning_rate": 1.683529411764706e-05,
"loss": 0.6983,
"step": 270
},
{
"epoch": 3.2941176470588234,
"grad_norm": 4.434069633483887,
"learning_rate": 1.671764705882353e-05,
"loss": 0.5764,
"step": 280
},
{
"epoch": 3.411764705882353,
"grad_norm": 12.731746673583984,
"learning_rate": 1.66e-05,
"loss": 0.5199,
"step": 290
},
{
"epoch": 3.5294117647058822,
"grad_norm": 6.222368240356445,
"learning_rate": 1.648235294117647e-05,
"loss": 0.5333,
"step": 300
},
{
"epoch": 3.6470588235294117,
"grad_norm": 5.6505560874938965,
"learning_rate": 1.6364705882352944e-05,
"loss": 0.4524,
"step": 310
},
{
"epoch": 3.764705882352941,
"grad_norm": 5.23166036605835,
"learning_rate": 1.6247058823529413e-05,
"loss": 0.4974,
"step": 320
},
{
"epoch": 3.8823529411764706,
"grad_norm": 6.096327304840088,
"learning_rate": 1.6129411764705883e-05,
"loss": 0.4621,
"step": 330
},
{
"epoch": 4.0,
"grad_norm": 7.395042419433594,
"learning_rate": 1.6011764705882353e-05,
"loss": 0.4012,
"step": 340
},
{
"epoch": 4.0,
"eval_accuracy": 0.8941176470588236,
"eval_loss": 0.5824028253555298,
"eval_runtime": 2.3688,
"eval_samples_per_second": 71.766,
"eval_steps_per_second": 9.287,
"step": 340
},
{
"epoch": 4.117647058823529,
"grad_norm": 5.869823455810547,
"learning_rate": 1.5894117647058823e-05,
"loss": 0.3211,
"step": 350
},
{
"epoch": 4.235294117647059,
"grad_norm": 4.795381546020508,
"learning_rate": 1.5776470588235297e-05,
"loss": 0.3647,
"step": 360
},
{
"epoch": 4.352941176470588,
"grad_norm": 3.4636142253875732,
"learning_rate": 1.5658823529411766e-05,
"loss": 0.2564,
"step": 370
},
{
"epoch": 4.470588235294118,
"grad_norm": 10.156576156616211,
"learning_rate": 1.5541176470588236e-05,
"loss": 0.2745,
"step": 380
},
{
"epoch": 4.588235294117647,
"grad_norm": 2.828077554702759,
"learning_rate": 1.5423529411764706e-05,
"loss": 0.2418,
"step": 390
},
{
"epoch": 4.705882352941177,
"grad_norm": 3.717984199523926,
"learning_rate": 1.5305882352941176e-05,
"loss": 0.2605,
"step": 400
},
{
"epoch": 4.823529411764706,
"grad_norm": 1.5987720489501953,
"learning_rate": 1.5188235294117648e-05,
"loss": 0.2369,
"step": 410
},
{
"epoch": 4.9411764705882355,
"grad_norm": 3.3276009559631348,
"learning_rate": 1.507058823529412e-05,
"loss": 0.2366,
"step": 420
},
{
"epoch": 5.0,
"eval_accuracy": 0.9176470588235294,
"eval_loss": 0.4427616000175476,
"eval_runtime": 3.6872,
"eval_samples_per_second": 46.106,
"eval_steps_per_second": 5.967,
"step": 425
}
],
"logging_steps": 10,
"max_steps": 1700,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 29670808006140.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}