roberta-base-CD_baseline / trainer_state.json
rendchevi's picture
End of training
22d462b verified
{
"best_metric": 1.301220178604126,
"best_model_checkpoint": "roberta-base-CD_baseline/checkpoint-625",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 625,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 4.637064456939697,
"learning_rate": 1.968e-05,
"loss": 2.2753,
"step": 10
},
{
"epoch": 0.16,
"grad_norm": 15.68393325805664,
"learning_rate": 1.936e-05,
"loss": 2.0418,
"step": 20
},
{
"epoch": 0.24,
"grad_norm": 12.480018615722656,
"learning_rate": 1.904e-05,
"loss": 1.9195,
"step": 30
},
{
"epoch": 0.32,
"grad_norm": 41.00396728515625,
"learning_rate": 1.8720000000000004e-05,
"loss": 1.9629,
"step": 40
},
{
"epoch": 0.4,
"grad_norm": 4.204047203063965,
"learning_rate": 1.8400000000000003e-05,
"loss": 1.7429,
"step": 50
},
{
"epoch": 0.48,
"grad_norm": 8.693258285522461,
"learning_rate": 1.8080000000000003e-05,
"loss": 1.9048,
"step": 60
},
{
"epoch": 0.56,
"grad_norm": 3.7029831409454346,
"learning_rate": 1.7760000000000003e-05,
"loss": 1.8083,
"step": 70
},
{
"epoch": 0.64,
"grad_norm": 4.148443698883057,
"learning_rate": 1.7440000000000002e-05,
"loss": 1.6655,
"step": 80
},
{
"epoch": 0.72,
"grad_norm": 20.234933853149414,
"learning_rate": 1.7120000000000002e-05,
"loss": 1.5108,
"step": 90
},
{
"epoch": 0.8,
"grad_norm": 8.840304374694824,
"learning_rate": 1.6800000000000002e-05,
"loss": 1.7,
"step": 100
},
{
"epoch": 0.88,
"grad_norm": 7.2302350997924805,
"learning_rate": 1.648e-05,
"loss": 1.6971,
"step": 110
},
{
"epoch": 0.96,
"grad_norm": 3.1535422801971436,
"learning_rate": 1.616e-05,
"loss": 1.6077,
"step": 120
},
{
"epoch": 1.0,
"eval_accuracy": 0.41304347826086957,
"eval_f1": 0.3267868906455863,
"eval_loss": 1.6377873420715332,
"eval_precision": 0.29971778816474115,
"eval_recall": 0.41304347826086957,
"eval_runtime": 1.3051,
"eval_samples_per_second": 176.233,
"eval_steps_per_second": 11.493,
"step": 125
},
{
"epoch": 1.04,
"grad_norm": 9.82644271850586,
"learning_rate": 1.584e-05,
"loss": 1.7461,
"step": 130
},
{
"epoch": 1.12,
"grad_norm": 7.467548370361328,
"learning_rate": 1.552e-05,
"loss": 1.6311,
"step": 140
},
{
"epoch": 1.2,
"grad_norm": 5.879318714141846,
"learning_rate": 1.5200000000000002e-05,
"loss": 1.4653,
"step": 150
},
{
"epoch": 1.28,
"grad_norm": 12.443910598754883,
"learning_rate": 1.4880000000000002e-05,
"loss": 1.628,
"step": 160
},
{
"epoch": 1.36,
"grad_norm": 5.35117244720459,
"learning_rate": 1.4560000000000001e-05,
"loss": 1.5741,
"step": 170
},
{
"epoch": 1.44,
"grad_norm": 7.006358623504639,
"learning_rate": 1.4240000000000001e-05,
"loss": 1.8264,
"step": 180
},
{
"epoch": 1.52,
"grad_norm": 5.610796928405762,
"learning_rate": 1.392e-05,
"loss": 1.5304,
"step": 190
},
{
"epoch": 1.6,
"grad_norm": 23.96417808532715,
"learning_rate": 1.3600000000000002e-05,
"loss": 1.4833,
"step": 200
},
{
"epoch": 1.68,
"grad_norm": 6.071532249450684,
"learning_rate": 1.3280000000000002e-05,
"loss": 1.4721,
"step": 210
},
{
"epoch": 1.76,
"grad_norm": 27.290363311767578,
"learning_rate": 1.2960000000000001e-05,
"loss": 1.6517,
"step": 220
},
{
"epoch": 1.84,
"grad_norm": 7.392124176025391,
"learning_rate": 1.2640000000000001e-05,
"loss": 1.5183,
"step": 230
},
{
"epoch": 1.92,
"grad_norm": 8.155671119689941,
"learning_rate": 1.232e-05,
"loss": 1.4147,
"step": 240
},
{
"epoch": 2.0,
"grad_norm": 7.289750099182129,
"learning_rate": 1.2e-05,
"loss": 1.6016,
"step": 250
},
{
"epoch": 2.0,
"eval_accuracy": 0.48695652173913045,
"eval_f1": 0.4109191960797593,
"eval_loss": 1.460945963859558,
"eval_precision": 0.39039609511101225,
"eval_recall": 0.48695652173913045,
"eval_runtime": 1.2886,
"eval_samples_per_second": 178.493,
"eval_steps_per_second": 11.641,
"step": 250
},
{
"epoch": 2.08,
"grad_norm": 15.545705795288086,
"learning_rate": 1.168e-05,
"loss": 1.3776,
"step": 260
},
{
"epoch": 2.16,
"grad_norm": 7.504268646240234,
"learning_rate": 1.136e-05,
"loss": 1.3455,
"step": 270
},
{
"epoch": 2.24,
"grad_norm": 8.06009292602539,
"learning_rate": 1.1040000000000001e-05,
"loss": 1.4793,
"step": 280
},
{
"epoch": 2.32,
"grad_norm": 18.173667907714844,
"learning_rate": 1.072e-05,
"loss": 1.4226,
"step": 290
},
{
"epoch": 2.4,
"grad_norm": 11.790044784545898,
"learning_rate": 1.04e-05,
"loss": 1.386,
"step": 300
},
{
"epoch": 2.48,
"grad_norm": 7.876099109649658,
"learning_rate": 1.008e-05,
"loss": 1.3522,
"step": 310
},
{
"epoch": 2.56,
"grad_norm": 10.516275405883789,
"learning_rate": 9.760000000000001e-06,
"loss": 1.4296,
"step": 320
},
{
"epoch": 2.64,
"grad_norm": 8.990633010864258,
"learning_rate": 9.440000000000001e-06,
"loss": 1.4941,
"step": 330
},
{
"epoch": 2.72,
"grad_norm": 8.365500450134277,
"learning_rate": 9.12e-06,
"loss": 1.4153,
"step": 340
},
{
"epoch": 2.8,
"grad_norm": 9.450441360473633,
"learning_rate": 8.8e-06,
"loss": 1.522,
"step": 350
},
{
"epoch": 2.88,
"grad_norm": 18.165035247802734,
"learning_rate": 8.48e-06,
"loss": 1.3895,
"step": 360
},
{
"epoch": 2.96,
"grad_norm": 10.70251750946045,
"learning_rate": 8.16e-06,
"loss": 1.2479,
"step": 370
},
{
"epoch": 3.0,
"eval_accuracy": 0.5043478260869565,
"eval_f1": 0.44849074152912793,
"eval_loss": 1.4184553623199463,
"eval_precision": 0.4235901359719885,
"eval_recall": 0.5043478260869565,
"eval_runtime": 1.303,
"eval_samples_per_second": 176.51,
"eval_steps_per_second": 11.511,
"step": 375
},
{
"epoch": 3.04,
"grad_norm": 23.491443634033203,
"learning_rate": 7.840000000000001e-06,
"loss": 1.2631,
"step": 380
},
{
"epoch": 3.12,
"grad_norm": 8.99567985534668,
"learning_rate": 7.520000000000001e-06,
"loss": 1.0625,
"step": 390
},
{
"epoch": 3.2,
"grad_norm": 12.7035493850708,
"learning_rate": 7.2000000000000005e-06,
"loss": 1.3823,
"step": 400
},
{
"epoch": 3.28,
"grad_norm": 15.28470230102539,
"learning_rate": 6.88e-06,
"loss": 1.2633,
"step": 410
},
{
"epoch": 3.36,
"grad_norm": 10.78918170928955,
"learning_rate": 6.560000000000001e-06,
"loss": 1.1007,
"step": 420
},
{
"epoch": 3.44,
"grad_norm": 14.376765251159668,
"learning_rate": 6.24e-06,
"loss": 1.2674,
"step": 430
},
{
"epoch": 3.52,
"grad_norm": 11.458234786987305,
"learning_rate": 5.92e-06,
"loss": 1.0922,
"step": 440
},
{
"epoch": 3.6,
"grad_norm": 15.360867500305176,
"learning_rate": 5.600000000000001e-06,
"loss": 1.2077,
"step": 450
},
{
"epoch": 3.68,
"grad_norm": 20.423187255859375,
"learning_rate": 5.28e-06,
"loss": 1.302,
"step": 460
},
{
"epoch": 3.76,
"grad_norm": 13.024535179138184,
"learning_rate": 4.960000000000001e-06,
"loss": 1.1218,
"step": 470
},
{
"epoch": 3.84,
"grad_norm": 14.865490913391113,
"learning_rate": 4.6400000000000005e-06,
"loss": 1.1858,
"step": 480
},
{
"epoch": 3.92,
"grad_norm": 17.874656677246094,
"learning_rate": 4.32e-06,
"loss": 1.1319,
"step": 490
},
{
"epoch": 4.0,
"grad_norm": 16.873672485351562,
"learning_rate": 4.000000000000001e-06,
"loss": 1.1542,
"step": 500
},
{
"epoch": 4.0,
"eval_accuracy": 0.5434782608695652,
"eval_f1": 0.5141449277787993,
"eval_loss": 1.3071645498275757,
"eval_precision": 0.5397252083665127,
"eval_recall": 0.5434782608695652,
"eval_runtime": 2.7726,
"eval_samples_per_second": 82.954,
"eval_steps_per_second": 5.41,
"step": 500
},
{
"epoch": 4.08,
"grad_norm": 16.543500900268555,
"learning_rate": 3.6800000000000003e-06,
"loss": 1.0007,
"step": 510
},
{
"epoch": 4.16,
"grad_norm": 13.177757263183594,
"learning_rate": 3.3600000000000004e-06,
"loss": 1.0294,
"step": 520
},
{
"epoch": 4.24,
"grad_norm": 10.053601264953613,
"learning_rate": 3.04e-06,
"loss": 1.1163,
"step": 530
},
{
"epoch": 4.32,
"grad_norm": 20.3472957611084,
"learning_rate": 2.7200000000000002e-06,
"loss": 1.1222,
"step": 540
},
{
"epoch": 4.4,
"grad_norm": 10.218711853027344,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.964,
"step": 550
},
{
"epoch": 4.48,
"grad_norm": 10.360238075256348,
"learning_rate": 2.08e-06,
"loss": 1.0094,
"step": 560
},
{
"epoch": 4.56,
"grad_norm": 16.11949348449707,
"learning_rate": 1.76e-06,
"loss": 1.0102,
"step": 570
},
{
"epoch": 4.64,
"grad_norm": 14.713994979858398,
"learning_rate": 1.44e-06,
"loss": 1.0562,
"step": 580
},
{
"epoch": 4.72,
"grad_norm": 12.219269752502441,
"learning_rate": 1.12e-06,
"loss": 0.9808,
"step": 590
},
{
"epoch": 4.8,
"grad_norm": 27.00701141357422,
"learning_rate": 8.000000000000001e-07,
"loss": 1.1036,
"step": 600
},
{
"epoch": 4.88,
"grad_norm": 18.196247100830078,
"learning_rate": 4.800000000000001e-07,
"loss": 1.1907,
"step": 610
},
{
"epoch": 4.96,
"grad_norm": 18.10931968688965,
"learning_rate": 1.6e-07,
"loss": 1.1302,
"step": 620
},
{
"epoch": 5.0,
"eval_accuracy": 0.5434782608695652,
"eval_f1": 0.5062253924435693,
"eval_loss": 1.301220178604126,
"eval_precision": 0.511256656273167,
"eval_recall": 0.5434782608695652,
"eval_runtime": 2.7886,
"eval_samples_per_second": 82.478,
"eval_steps_per_second": 5.379,
"step": 625
},
{
"epoch": 5.0,
"step": 625,
"total_flos": 1434186246250944.0,
"train_loss": 1.4087707061767578,
"train_runtime": 222.2422,
"train_samples_per_second": 44.906,
"train_steps_per_second": 2.812
}
],
"logging_steps": 10,
"max_steps": 625,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1434186246250944.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}