SapBERT_freeze_hypencoder_hardneg / trainer_state.json
Stevenf232's picture
Upload trained model from Colab
9602540 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 108.46660614013672,
"learning_rate": 4.5e-06,
"loss": 6.8268,
"step": 10
},
{
"epoch": 0.5,
"grad_norm": 87.85498046875,
"learning_rate": 9.5e-06,
"loss": 6.2692,
"step": 20
},
{
"epoch": 0.75,
"grad_norm": 77.33071899414062,
"learning_rate": 1.45e-05,
"loss": 5.4279,
"step": 30
},
{
"epoch": 1.0,
"grad_norm": 84.33023071289062,
"learning_rate": 1.95e-05,
"loss": 4.4944,
"step": 40
},
{
"epoch": 1.25,
"grad_norm": 73.86274719238281,
"learning_rate": 1.95e-05,
"loss": 3.2618,
"step": 50
},
{
"epoch": 1.5,
"grad_norm": 59.88399124145508,
"learning_rate": 1.8944444444444447e-05,
"loss": 2.4952,
"step": 60
},
{
"epoch": 1.75,
"grad_norm": 68.54973602294922,
"learning_rate": 1.838888888888889e-05,
"loss": 1.9448,
"step": 70
},
{
"epoch": 2.0,
"grad_norm": 64.97733306884766,
"learning_rate": 1.7833333333333334e-05,
"loss": 1.6445,
"step": 80
},
{
"epoch": 2.25,
"grad_norm": 58.61252212524414,
"learning_rate": 1.727777777777778e-05,
"loss": 1.1984,
"step": 90
},
{
"epoch": 2.5,
"grad_norm": 39.56243133544922,
"learning_rate": 1.6722222222222225e-05,
"loss": 1.1865,
"step": 100
},
{
"epoch": 2.75,
"grad_norm": 48.185123443603516,
"learning_rate": 1.616666666666667e-05,
"loss": 1.0324,
"step": 110
},
{
"epoch": 3.0,
"grad_norm": 49.13726043701172,
"learning_rate": 1.5611111111111113e-05,
"loss": 0.8747,
"step": 120
},
{
"epoch": 3.25,
"grad_norm": 31.3487606048584,
"learning_rate": 1.5055555555555556e-05,
"loss": 0.7836,
"step": 130
},
{
"epoch": 3.5,
"grad_norm": 43.31613540649414,
"learning_rate": 1.45e-05,
"loss": 0.729,
"step": 140
},
{
"epoch": 3.75,
"grad_norm": 45.93892288208008,
"learning_rate": 1.3944444444444446e-05,
"loss": 0.7182,
"step": 150
},
{
"epoch": 4.0,
"grad_norm": 65.34429931640625,
"learning_rate": 1.338888888888889e-05,
"loss": 0.6853,
"step": 160
},
{
"epoch": 4.25,
"grad_norm": 40.68680191040039,
"learning_rate": 1.2833333333333335e-05,
"loss": 0.6082,
"step": 170
},
{
"epoch": 4.5,
"grad_norm": 23.430301666259766,
"learning_rate": 1.227777777777778e-05,
"loss": 0.6027,
"step": 180
},
{
"epoch": 4.75,
"grad_norm": 31.279895782470703,
"learning_rate": 1.1722222222222224e-05,
"loss": 0.5536,
"step": 190
},
{
"epoch": 5.0,
"grad_norm": 40.851078033447266,
"learning_rate": 1.1166666666666668e-05,
"loss": 0.5974,
"step": 200
},
{
"epoch": 5.25,
"grad_norm": 41.20182800292969,
"learning_rate": 1.0611111111111111e-05,
"loss": 0.4913,
"step": 210
},
{
"epoch": 5.5,
"grad_norm": 44.764102935791016,
"learning_rate": 1.0055555555555557e-05,
"loss": 0.5283,
"step": 220
},
{
"epoch": 5.75,
"grad_norm": 76.29002380371094,
"learning_rate": 9.5e-06,
"loss": 0.5435,
"step": 230
},
{
"epoch": 6.0,
"grad_norm": 42.7576904296875,
"learning_rate": 8.944444444444446e-06,
"loss": 0.5126,
"step": 240
},
{
"epoch": 6.25,
"grad_norm": 31.921890258789062,
"learning_rate": 8.38888888888889e-06,
"loss": 0.4732,
"step": 250
},
{
"epoch": 6.5,
"grad_norm": 344.25213623046875,
"learning_rate": 7.833333333333333e-06,
"loss": 0.507,
"step": 260
},
{
"epoch": 6.75,
"grad_norm": 39.58772659301758,
"learning_rate": 7.277777777777778e-06,
"loss": 0.4971,
"step": 270
},
{
"epoch": 7.0,
"grad_norm": 32.24217987060547,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.4867,
"step": 280
},
{
"epoch": 7.25,
"grad_norm": 36.557579040527344,
"learning_rate": 6.166666666666667e-06,
"loss": 0.417,
"step": 290
},
{
"epoch": 7.5,
"grad_norm": 32.9907341003418,
"learning_rate": 5.611111111111112e-06,
"loss": 0.4371,
"step": 300
},
{
"epoch": 7.75,
"grad_norm": 48.47484588623047,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.4747,
"step": 310
},
{
"epoch": 8.0,
"grad_norm": 39.65238952636719,
"learning_rate": 4.5e-06,
"loss": 0.4309,
"step": 320
},
{
"epoch": 8.25,
"grad_norm": 30.107986450195312,
"learning_rate": 3.944444444444445e-06,
"loss": 0.4543,
"step": 330
},
{
"epoch": 8.5,
"grad_norm": 34.437103271484375,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.4244,
"step": 340
},
{
"epoch": 8.75,
"grad_norm": 43.94343566894531,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.4722,
"step": 350
},
{
"epoch": 9.0,
"grad_norm": 36.12664031982422,
"learning_rate": 2.277777777777778e-06,
"loss": 0.438,
"step": 360
},
{
"epoch": 9.25,
"grad_norm": 43.92878341674805,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.4211,
"step": 370
},
{
"epoch": 9.5,
"grad_norm": 35.80113983154297,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.419,
"step": 380
},
{
"epoch": 9.75,
"grad_norm": 33.334144592285156,
"learning_rate": 6.111111111111112e-07,
"loss": 0.4148,
"step": 390
},
{
"epoch": 10.0,
"grad_norm": 33.29014205932617,
"learning_rate": 5.555555555555556e-08,
"loss": 0.4525,
"step": 400
}
],
"logging_steps": 10,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}