abcd / trainer_state.json
hardik9719's picture
End of training
ceee9a6 verified
{
"best_global_step": 552,
"best_metric": 0.758873745694174,
"best_model_checkpoint": "abcd\\checkpoint-552",
"epoch": 5.157608695652174,
"eval_steps": 500,
"global_step": 552,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018115942028985508,
"grad_norm": 13.317688941955566,
"learning_rate": 8.035714285714286e-06,
"loss": 0.8905,
"step": 10
},
{
"epoch": 0.036231884057971016,
"grad_norm": 12.00970458984375,
"learning_rate": 1.6964285714285715e-05,
"loss": 0.7819,
"step": 20
},
{
"epoch": 0.05434782608695652,
"grad_norm": 14.536447525024414,
"learning_rate": 2.5892857142857148e-05,
"loss": 0.9766,
"step": 30
},
{
"epoch": 0.07246376811594203,
"grad_norm": 15.37159252166748,
"learning_rate": 3.4821428571428574e-05,
"loss": 0.8572,
"step": 40
},
{
"epoch": 0.09057971014492754,
"grad_norm": 20.875459671020508,
"learning_rate": 4.375e-05,
"loss": 1.0996,
"step": 50
},
{
"epoch": 0.10869565217391304,
"grad_norm": 13.146461486816406,
"learning_rate": 4.969758064516129e-05,
"loss": 0.9429,
"step": 60
},
{
"epoch": 0.12681159420289856,
"grad_norm": 20.685447692871094,
"learning_rate": 4.868951612903226e-05,
"loss": 1.1377,
"step": 70
},
{
"epoch": 0.14492753623188406,
"grad_norm": 16.825111389160156,
"learning_rate": 4.768145161290323e-05,
"loss": 1.0243,
"step": 80
},
{
"epoch": 0.16304347826086957,
"grad_norm": 19.70474624633789,
"learning_rate": 4.6673387096774196e-05,
"loss": 1.1325,
"step": 90
},
{
"epoch": 0.16847826086956522,
"eval_accuracy": 0.556836902800659,
"eval_loss": 1.0675878524780273,
"eval_runtime": 1853.5174,
"eval_samples_per_second": 7.205,
"eval_steps_per_second": 1.801,
"step": 93
},
{
"epoch": 1.0126811594202898,
"grad_norm": 11.140310287475586,
"learning_rate": 4.566532258064516e-05,
"loss": 0.8952,
"step": 100
},
{
"epoch": 1.0307971014492754,
"grad_norm": 6.155562877655029,
"learning_rate": 4.465725806451613e-05,
"loss": 0.8204,
"step": 110
},
{
"epoch": 1.048913043478261,
"grad_norm": 9.62833023071289,
"learning_rate": 4.36491935483871e-05,
"loss": 0.8178,
"step": 120
},
{
"epoch": 1.0670289855072463,
"grad_norm": 14.596573829650879,
"learning_rate": 4.2641129032258064e-05,
"loss": 0.9171,
"step": 130
},
{
"epoch": 1.085144927536232,
"grad_norm": 14.76526927947998,
"learning_rate": 4.163306451612903e-05,
"loss": 0.781,
"step": 140
},
{
"epoch": 1.1032608695652173,
"grad_norm": 21.03084945678711,
"learning_rate": 4.0625000000000005e-05,
"loss": 0.7955,
"step": 150
},
{
"epoch": 1.1213768115942029,
"grad_norm": 18.31418800354004,
"learning_rate": 3.961693548387097e-05,
"loss": 1.1034,
"step": 160
},
{
"epoch": 1.1394927536231885,
"grad_norm": 17.097795486450195,
"learning_rate": 3.860887096774194e-05,
"loss": 0.8052,
"step": 170
},
{
"epoch": 1.1576086956521738,
"grad_norm": 15.764641761779785,
"learning_rate": 3.7600806451612906e-05,
"loss": 0.738,
"step": 180
},
{
"epoch": 1.1684782608695652,
"eval_accuracy": 0.549947581249064,
"eval_loss": 1.2091021537780762,
"eval_runtime": 1866.8421,
"eval_samples_per_second": 7.153,
"eval_steps_per_second": 1.789,
"step": 186
},
{
"epoch": 2.0072463768115942,
"grad_norm": 6.231867790222168,
"learning_rate": 3.659274193548387e-05,
"loss": 0.4987,
"step": 190
},
{
"epoch": 2.0253623188405796,
"grad_norm": 7.453345775604248,
"learning_rate": 3.558467741935484e-05,
"loss": 0.3465,
"step": 200
},
{
"epoch": 2.0434782608695654,
"grad_norm": 21.0439453125,
"learning_rate": 3.457661290322581e-05,
"loss": 0.3425,
"step": 210
},
{
"epoch": 2.0615942028985508,
"grad_norm": 18.279014587402344,
"learning_rate": 3.3568548387096774e-05,
"loss": 0.6557,
"step": 220
},
{
"epoch": 2.079710144927536,
"grad_norm": 27.888294219970703,
"learning_rate": 3.256048387096775e-05,
"loss": 0.8808,
"step": 230
},
{
"epoch": 2.097826086956522,
"grad_norm": 16.571508407592773,
"learning_rate": 3.1552419354838715e-05,
"loss": 0.49,
"step": 240
},
{
"epoch": 2.1159420289855073,
"grad_norm": 23.84330177307129,
"learning_rate": 3.0544354838709676e-05,
"loss": 0.6079,
"step": 250
},
{
"epoch": 2.1340579710144927,
"grad_norm": 13.54464340209961,
"learning_rate": 2.9536290322580646e-05,
"loss": 0.4746,
"step": 260
},
{
"epoch": 2.1521739130434785,
"grad_norm": 22.121822357177734,
"learning_rate": 2.8528225806451613e-05,
"loss": 0.6397,
"step": 270
},
{
"epoch": 2.1684782608695654,
"eval_accuracy": 0.611577055563876,
"eval_loss": 1.2484452724456787,
"eval_runtime": 1864.5566,
"eval_samples_per_second": 7.162,
"eval_steps_per_second": 1.791,
"step": 279
},
{
"epoch": 3.0018115942028984,
"grad_norm": 16.256120681762695,
"learning_rate": 2.7520161290322584e-05,
"loss": 0.473,
"step": 280
},
{
"epoch": 3.0199275362318843,
"grad_norm": 7.968146324157715,
"learning_rate": 2.651209677419355e-05,
"loss": 0.3852,
"step": 290
},
{
"epoch": 3.0380434782608696,
"grad_norm": 21.890714645385742,
"learning_rate": 2.550403225806452e-05,
"loss": 0.4942,
"step": 300
},
{
"epoch": 3.056159420289855,
"grad_norm": 14.216273307800293,
"learning_rate": 2.4495967741935485e-05,
"loss": 0.3557,
"step": 310
},
{
"epoch": 3.074275362318841,
"grad_norm": 2.547912836074829,
"learning_rate": 2.3487903225806455e-05,
"loss": 0.3706,
"step": 320
},
{
"epoch": 3.092391304347826,
"grad_norm": 16.752574920654297,
"learning_rate": 2.247983870967742e-05,
"loss": 0.444,
"step": 330
},
{
"epoch": 3.1105072463768115,
"grad_norm": 15.96899127960205,
"learning_rate": 2.147177419354839e-05,
"loss": 0.4921,
"step": 340
},
{
"epoch": 3.128623188405797,
"grad_norm": 11.133148193359375,
"learning_rate": 2.0463709677419356e-05,
"loss": 0.3874,
"step": 350
},
{
"epoch": 3.1467391304347827,
"grad_norm": 6.035247802734375,
"learning_rate": 1.9455645161290323e-05,
"loss": 0.5042,
"step": 360
},
{
"epoch": 3.164855072463768,
"grad_norm": 4.174564361572266,
"learning_rate": 1.844758064516129e-05,
"loss": 0.4395,
"step": 370
},
{
"epoch": 3.1684782608695654,
"eval_accuracy": 0.642728770405871,
"eval_loss": 1.0139626264572144,
"eval_runtime": 1859.3799,
"eval_samples_per_second": 7.182,
"eval_steps_per_second": 1.796,
"step": 372
},
{
"epoch": 4.0144927536231885,
"grad_norm": 3.096120595932007,
"learning_rate": 1.743951612903226e-05,
"loss": 0.2476,
"step": 380
},
{
"epoch": 4.032608695652174,
"grad_norm": 17.17482566833496,
"learning_rate": 1.6431451612903225e-05,
"loss": 0.324,
"step": 390
},
{
"epoch": 4.050724637681159,
"grad_norm": 4.938539505004883,
"learning_rate": 1.5423387096774195e-05,
"loss": 0.2958,
"step": 400
},
{
"epoch": 4.068840579710145,
"grad_norm": 4.778896808624268,
"learning_rate": 1.4415322580645164e-05,
"loss": 0.284,
"step": 410
},
{
"epoch": 4.086956521739131,
"grad_norm": 20.58946418762207,
"learning_rate": 1.340725806451613e-05,
"loss": 0.4622,
"step": 420
},
{
"epoch": 4.105072463768116,
"grad_norm": 0.7577747702598572,
"learning_rate": 1.2399193548387098e-05,
"loss": 0.2173,
"step": 430
},
{
"epoch": 4.1231884057971016,
"grad_norm": 0.33162328600883484,
"learning_rate": 1.1391129032258065e-05,
"loss": 0.2307,
"step": 440
},
{
"epoch": 4.141304347826087,
"grad_norm": 4.827192783355713,
"learning_rate": 1.0383064516129034e-05,
"loss": 0.3314,
"step": 450
},
{
"epoch": 4.159420289855072,
"grad_norm": 2.6278650760650635,
"learning_rate": 9.375000000000001e-06,
"loss": 0.3098,
"step": 460
},
{
"epoch": 4.168478260869565,
"eval_accuracy": 0.6830163246967201,
"eval_loss": 0.9756622910499573,
"eval_runtime": 1864.2768,
"eval_samples_per_second": 7.163,
"eval_steps_per_second": 1.791,
"step": 465
},
{
"epoch": 5.009057971014493,
"grad_norm": 12.96372127532959,
"learning_rate": 8.366935483870968e-06,
"loss": 0.4201,
"step": 470
},
{
"epoch": 5.0271739130434785,
"grad_norm": 17.302637100219727,
"learning_rate": 7.358870967741936e-06,
"loss": 0.3379,
"step": 480
},
{
"epoch": 5.045289855072464,
"grad_norm": 4.884481906890869,
"learning_rate": 6.350806451612904e-06,
"loss": 0.3286,
"step": 490
},
{
"epoch": 5.063405797101449,
"grad_norm": 18.058940887451172,
"learning_rate": 5.342741935483872e-06,
"loss": 0.258,
"step": 500
},
{
"epoch": 5.081521739130435,
"grad_norm": 1.203333854675293,
"learning_rate": 4.33467741935484e-06,
"loss": 0.1982,
"step": 510
},
{
"epoch": 5.09963768115942,
"grad_norm": 21.148109436035156,
"learning_rate": 3.3266129032258062e-06,
"loss": 0.2102,
"step": 520
},
{
"epoch": 5.117753623188406,
"grad_norm": 1.2070428133010864,
"learning_rate": 2.318548387096774e-06,
"loss": 0.2017,
"step": 530
},
{
"epoch": 5.135869565217392,
"grad_norm": 1.2308259010314941,
"learning_rate": 1.310483870967742e-06,
"loss": 0.1859,
"step": 540
},
{
"epoch": 5.153985507246377,
"grad_norm": 22.218280792236328,
"learning_rate": 3.024193548387097e-07,
"loss": 0.1016,
"step": 550
},
{
"epoch": 5.157608695652174,
"eval_accuracy": 0.758873745694174,
"eval_loss": 0.7178874611854553,
"eval_runtime": 1859.6371,
"eval_samples_per_second": 7.181,
"eval_steps_per_second": 1.796,
"step": 552
},
{
"epoch": 5.157608695652174,
"step": 552,
"total_flos": 1.9302137237930803e+18,
"train_loss": 0.5571505176848259,
"train_runtime": 11844.7659,
"train_samples_per_second": 0.186,
"train_steps_per_second": 0.047
},
{
"epoch": 5.157608695652174,
"eval_accuracy": 0.7891051507239497,
"eval_loss": 0.6989445090293884,
"eval_runtime": 1206.5933,
"eval_samples_per_second": 6.983,
"eval_steps_per_second": 1.746,
"step": 552
},
{
"epoch": 5.157608695652174,
"eval_accuracy": 0.78867879652753,
"eval_loss": 0.7002037763595581,
"eval_runtime": 1290.721,
"eval_samples_per_second": 6.515,
"eval_steps_per_second": 1.629,
"step": 552
}
],
"logging_steps": 10,
"max_steps": 552,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9302137237930803e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}