SapBERT_freeze_hypencoder / trainer_state.json
Stevenf232's picture
Upload trained model from Colab
8f8fc2b verified
raw
history blame
6.96 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 107.36187744140625,
"learning_rate": 4.5e-06,
"loss": 4.6121,
"step": 10
},
{
"epoch": 0.5,
"grad_norm": 81.77188110351562,
"learning_rate": 9.5e-06,
"loss": 4.1314,
"step": 20
},
{
"epoch": 0.75,
"grad_norm": 82.85482788085938,
"learning_rate": 1.45e-05,
"loss": 3.2993,
"step": 30
},
{
"epoch": 1.0,
"grad_norm": 63.60570526123047,
"learning_rate": 1.95e-05,
"loss": 2.3656,
"step": 40
},
{
"epoch": 1.25,
"grad_norm": 46.48249816894531,
"learning_rate": 1.95e-05,
"loss": 1.2769,
"step": 50
},
{
"epoch": 1.5,
"grad_norm": 44.416969299316406,
"learning_rate": 1.8944444444444447e-05,
"loss": 0.7619,
"step": 60
},
{
"epoch": 1.75,
"grad_norm": 36.2529296875,
"learning_rate": 1.838888888888889e-05,
"loss": 0.4734,
"step": 70
},
{
"epoch": 2.0,
"grad_norm": 25.273788452148438,
"learning_rate": 1.7833333333333334e-05,
"loss": 0.3357,
"step": 80
},
{
"epoch": 2.25,
"grad_norm": 23.972837448120117,
"learning_rate": 1.727777777777778e-05,
"loss": 0.1776,
"step": 90
},
{
"epoch": 2.5,
"grad_norm": 32.60515594482422,
"learning_rate": 1.6722222222222225e-05,
"loss": 0.173,
"step": 100
},
{
"epoch": 2.75,
"grad_norm": 11.92000961303711,
"learning_rate": 1.616666666666667e-05,
"loss": 0.1374,
"step": 110
},
{
"epoch": 3.0,
"grad_norm": 32.889869689941406,
"learning_rate": 1.5611111111111113e-05,
"loss": 0.1166,
"step": 120
},
{
"epoch": 3.25,
"grad_norm": 13.984763145446777,
"learning_rate": 1.5055555555555556e-05,
"loss": 0.1058,
"step": 130
},
{
"epoch": 3.5,
"grad_norm": 9.11774730682373,
"learning_rate": 1.45e-05,
"loss": 0.0893,
"step": 140
},
{
"epoch": 3.75,
"grad_norm": 10.404315948486328,
"learning_rate": 1.3944444444444446e-05,
"loss": 0.0994,
"step": 150
},
{
"epoch": 4.0,
"grad_norm": 39.227901458740234,
"learning_rate": 1.338888888888889e-05,
"loss": 0.0954,
"step": 160
},
{
"epoch": 4.25,
"grad_norm": 14.942120552062988,
"learning_rate": 1.2833333333333335e-05,
"loss": 0.0884,
"step": 170
},
{
"epoch": 4.5,
"grad_norm": 12.529537200927734,
"learning_rate": 1.227777777777778e-05,
"loss": 0.0724,
"step": 180
},
{
"epoch": 4.75,
"grad_norm": 7.07827091217041,
"learning_rate": 1.1722222222222224e-05,
"loss": 0.0805,
"step": 190
},
{
"epoch": 5.0,
"grad_norm": 26.651918411254883,
"learning_rate": 1.1166666666666668e-05,
"loss": 0.0919,
"step": 200
},
{
"epoch": 5.25,
"grad_norm": 13.577973365783691,
"learning_rate": 1.0611111111111111e-05,
"loss": 0.0732,
"step": 210
},
{
"epoch": 5.5,
"grad_norm": 12.209450721740723,
"learning_rate": 1.0055555555555557e-05,
"loss": 0.0648,
"step": 220
},
{
"epoch": 5.75,
"grad_norm": 33.70127487182617,
"learning_rate": 9.5e-06,
"loss": 0.0672,
"step": 230
},
{
"epoch": 6.0,
"grad_norm": 21.313919067382812,
"learning_rate": 8.944444444444446e-06,
"loss": 0.0742,
"step": 240
},
{
"epoch": 6.25,
"grad_norm": 21.87645721435547,
"learning_rate": 8.38888888888889e-06,
"loss": 0.0528,
"step": 250
},
{
"epoch": 6.5,
"grad_norm": 14.795717239379883,
"learning_rate": 7.833333333333333e-06,
"loss": 0.0646,
"step": 260
},
{
"epoch": 6.75,
"grad_norm": 17.657751083374023,
"learning_rate": 7.277777777777778e-06,
"loss": 0.0662,
"step": 270
},
{
"epoch": 7.0,
"grad_norm": 9.89352035522461,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.0852,
"step": 280
},
{
"epoch": 7.25,
"grad_norm": 10.50017261505127,
"learning_rate": 6.166666666666667e-06,
"loss": 0.0672,
"step": 290
},
{
"epoch": 7.5,
"grad_norm": 16.115074157714844,
"learning_rate": 5.611111111111112e-06,
"loss": 0.0807,
"step": 300
},
{
"epoch": 7.75,
"grad_norm": 15.634113311767578,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.056,
"step": 310
},
{
"epoch": 8.0,
"grad_norm": 4.4182329177856445,
"learning_rate": 4.5e-06,
"loss": 0.0576,
"step": 320
},
{
"epoch": 8.25,
"grad_norm": 22.3959903717041,
"learning_rate": 3.944444444444445e-06,
"loss": 0.0759,
"step": 330
},
{
"epoch": 8.5,
"grad_norm": 10.154661178588867,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.0346,
"step": 340
},
{
"epoch": 8.75,
"grad_norm": 4.109118938446045,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.0577,
"step": 350
},
{
"epoch": 9.0,
"grad_norm": 5.147540092468262,
"learning_rate": 2.277777777777778e-06,
"loss": 0.0587,
"step": 360
},
{
"epoch": 9.25,
"grad_norm": 8.975791931152344,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.0507,
"step": 370
},
{
"epoch": 9.5,
"grad_norm": 25.286869049072266,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.0496,
"step": 380
},
{
"epoch": 9.75,
"grad_norm": 13.948539733886719,
"learning_rate": 6.111111111111112e-07,
"loss": 0.0576,
"step": 390
},
{
"epoch": 10.0,
"grad_norm": 20.6357421875,
"learning_rate": 5.555555555555556e-08,
"loss": 0.08,
"step": 400
}
],
"logging_steps": 10,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}