OLMo-100F-75D / trainer_state.json
Lamsheeper's picture
Upload trainer_state.json with huggingface_hub
803976a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 28.0,
"eval_steps": 500,
"global_step": 4900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05714285714285714,
"grad_norm": 17.375,
"learning_rate": 8e-05,
"loss": 2.6583,
"step": 10
},
{
"epoch": 0.11428571428571428,
"grad_norm": 10.1875,
"learning_rate": 8e-05,
"loss": 1.4692,
"step": 20
},
{
"epoch": 0.17142857142857143,
"grad_norm": 9.1875,
"learning_rate": 8e-05,
"loss": 1.3322,
"step": 30
},
{
"epoch": 0.22857142857142856,
"grad_norm": 9.0,
"learning_rate": 8e-05,
"loss": 1.2474,
"step": 40
},
{
"epoch": 0.2857142857142857,
"grad_norm": 8.5,
"learning_rate": 8e-05,
"loss": 1.2488,
"step": 50
},
{
"epoch": 0.34285714285714286,
"grad_norm": 22.75,
"learning_rate": 8e-05,
"loss": 1.0061,
"step": 60
},
{
"epoch": 0.4,
"grad_norm": 16.0,
"learning_rate": 8e-05,
"loss": 1.1315,
"step": 70
},
{
"epoch": 0.45714285714285713,
"grad_norm": 8.375,
"learning_rate": 8e-05,
"loss": 0.9975,
"step": 80
},
{
"epoch": 0.5142857142857142,
"grad_norm": 7.8125,
"learning_rate": 8e-05,
"loss": 1.2164,
"step": 90
},
{
"epoch": 0.5714285714285714,
"grad_norm": 15.0,
"learning_rate": 8e-05,
"loss": 1.1313,
"step": 100
},
{
"epoch": 0.6285714285714286,
"grad_norm": 8.1875,
"learning_rate": 8e-05,
"loss": 1.1283,
"step": 110
},
{
"epoch": 0.6857142857142857,
"grad_norm": 9.5625,
"learning_rate": 8e-05,
"loss": 1.1187,
"step": 120
},
{
"epoch": 0.7428571428571429,
"grad_norm": 15.5,
"learning_rate": 8e-05,
"loss": 0.997,
"step": 130
},
{
"epoch": 0.8,
"grad_norm": 18.75,
"learning_rate": 8e-05,
"loss": 0.8798,
"step": 140
},
{
"epoch": 0.8571428571428571,
"grad_norm": 7.15625,
"learning_rate": 8e-05,
"loss": 0.7601,
"step": 150
},
{
"epoch": 0.9142857142857143,
"grad_norm": 8.75,
"learning_rate": 8e-05,
"loss": 0.6739,
"step": 160
},
{
"epoch": 0.9714285714285714,
"grad_norm": 7.5625,
"learning_rate": 8e-05,
"loss": 0.9945,
"step": 170
},
{
"epoch": 1.0285714285714285,
"grad_norm": 7.25,
"learning_rate": 8e-05,
"loss": 0.7509,
"step": 180
},
{
"epoch": 1.0857142857142856,
"grad_norm": 7.4375,
"learning_rate": 8e-05,
"loss": 0.731,
"step": 190
},
{
"epoch": 1.1428571428571428,
"grad_norm": 7.625,
"learning_rate": 8e-05,
"loss": 0.8672,
"step": 200
},
{
"epoch": 1.2,
"grad_norm": 7.53125,
"learning_rate": 8e-05,
"loss": 0.7409,
"step": 210
},
{
"epoch": 1.2571428571428571,
"grad_norm": 8.375,
"learning_rate": 8e-05,
"loss": 0.7572,
"step": 220
},
{
"epoch": 1.3142857142857143,
"grad_norm": 7.53125,
"learning_rate": 8e-05,
"loss": 0.7667,
"step": 230
},
{
"epoch": 1.3714285714285714,
"grad_norm": 7.6875,
"learning_rate": 8e-05,
"loss": 0.7002,
"step": 240
},
{
"epoch": 1.4285714285714286,
"grad_norm": 8.4375,
"learning_rate": 8e-05,
"loss": 0.6992,
"step": 250
},
{
"epoch": 1.4857142857142858,
"grad_norm": 15.0,
"learning_rate": 8e-05,
"loss": 0.7765,
"step": 260
},
{
"epoch": 1.5428571428571427,
"grad_norm": 14.6875,
"learning_rate": 8e-05,
"loss": 0.5869,
"step": 270
},
{
"epoch": 1.6,
"grad_norm": 14.8125,
"learning_rate": 8e-05,
"loss": 0.8268,
"step": 280
},
{
"epoch": 1.657142857142857,
"grad_norm": 6.84375,
"learning_rate": 8e-05,
"loss": 0.7856,
"step": 290
},
{
"epoch": 1.7142857142857144,
"grad_norm": 14.75,
"learning_rate": 8e-05,
"loss": 0.6341,
"step": 300
},
{
"epoch": 1.7714285714285714,
"grad_norm": 7.21875,
"learning_rate": 8e-05,
"loss": 0.794,
"step": 310
},
{
"epoch": 1.8285714285714287,
"grad_norm": 14.0625,
"learning_rate": 8e-05,
"loss": 0.6861,
"step": 320
},
{
"epoch": 1.8857142857142857,
"grad_norm": 13.25,
"learning_rate": 8e-05,
"loss": 0.5252,
"step": 330
},
{
"epoch": 1.9428571428571428,
"grad_norm": 8.1875,
"learning_rate": 8e-05,
"loss": 0.7826,
"step": 340
},
{
"epoch": 2.0,
"grad_norm": 15.6875,
"learning_rate": 8e-05,
"loss": 0.7721,
"step": 350
},
{
"epoch": 2.057142857142857,
"grad_norm": 14.4375,
"learning_rate": 8e-05,
"loss": 0.6508,
"step": 360
},
{
"epoch": 2.1142857142857143,
"grad_norm": 14.625,
"learning_rate": 8e-05,
"loss": 0.6328,
"step": 370
},
{
"epoch": 2.1714285714285713,
"grad_norm": 14.375,
"learning_rate": 8e-05,
"loss": 0.5822,
"step": 380
},
{
"epoch": 2.2285714285714286,
"grad_norm": 7.46875,
"learning_rate": 8e-05,
"loss": 0.6849,
"step": 390
},
{
"epoch": 2.2857142857142856,
"grad_norm": 13.0625,
"learning_rate": 8e-05,
"loss": 0.5679,
"step": 400
},
{
"epoch": 2.342857142857143,
"grad_norm": 6.6875,
"learning_rate": 8e-05,
"loss": 0.552,
"step": 410
},
{
"epoch": 2.4,
"grad_norm": 10.5625,
"learning_rate": 8e-05,
"loss": 0.5635,
"step": 420
},
{
"epoch": 2.4571428571428573,
"grad_norm": 13.625,
"learning_rate": 8e-05,
"loss": 0.6423,
"step": 430
},
{
"epoch": 2.5142857142857142,
"grad_norm": 6.90625,
"learning_rate": 8e-05,
"loss": 0.6436,
"step": 440
},
{
"epoch": 2.571428571428571,
"grad_norm": 9.3125,
"learning_rate": 8e-05,
"loss": 0.5537,
"step": 450
},
{
"epoch": 2.6285714285714286,
"grad_norm": 16.0,
"learning_rate": 8e-05,
"loss": 0.5902,
"step": 460
},
{
"epoch": 2.685714285714286,
"grad_norm": 15.4375,
"learning_rate": 8e-05,
"loss": 0.6193,
"step": 470
},
{
"epoch": 2.742857142857143,
"grad_norm": 6.5,
"learning_rate": 8e-05,
"loss": 0.6679,
"step": 480
},
{
"epoch": 2.8,
"grad_norm": 9.375,
"learning_rate": 8e-05,
"loss": 0.5469,
"step": 490
},
{
"epoch": 2.857142857142857,
"grad_norm": 7.5,
"learning_rate": 8e-05,
"loss": 0.653,
"step": 500
},
{
"epoch": 2.857142857142857,
"eval_loss": 6.050149440765381,
"eval_runtime": 7.8659,
"eval_samples_per_second": 63.566,
"eval_steps_per_second": 63.566,
"step": 500
},
{
"epoch": 2.914285714285714,
"grad_norm": 7.15625,
"learning_rate": 8e-05,
"loss": 0.6111,
"step": 510
},
{
"epoch": 2.9714285714285715,
"grad_norm": 11.75,
"learning_rate": 8e-05,
"loss": 0.5121,
"step": 520
},
{
"epoch": 3.0285714285714285,
"grad_norm": 12.8125,
"learning_rate": 8e-05,
"loss": 0.5591,
"step": 530
},
{
"epoch": 3.085714285714286,
"grad_norm": 10.5,
"learning_rate": 8e-05,
"loss": 0.5514,
"step": 540
},
{
"epoch": 3.142857142857143,
"grad_norm": 13.375,
"learning_rate": 8e-05,
"loss": 0.4903,
"step": 550
},
{
"epoch": 3.2,
"grad_norm": 7.625,
"learning_rate": 8e-05,
"loss": 0.5356,
"step": 560
},
{
"epoch": 3.257142857142857,
"grad_norm": 13.3125,
"learning_rate": 8e-05,
"loss": 0.5641,
"step": 570
},
{
"epoch": 3.314285714285714,
"grad_norm": 6.875,
"learning_rate": 8e-05,
"loss": 0.5261,
"step": 580
},
{
"epoch": 3.3714285714285714,
"grad_norm": 13.1875,
"learning_rate": 8e-05,
"loss": 0.46,
"step": 590
},
{
"epoch": 3.4285714285714284,
"grad_norm": 13.5625,
"learning_rate": 8e-05,
"loss": 0.4915,
"step": 600
},
{
"epoch": 3.4857142857142858,
"grad_norm": 11.1875,
"learning_rate": 8e-05,
"loss": 0.5197,
"step": 610
},
{
"epoch": 3.5428571428571427,
"grad_norm": 14.8125,
"learning_rate": 8e-05,
"loss": 0.5629,
"step": 620
},
{
"epoch": 3.6,
"grad_norm": 7.46875,
"learning_rate": 8e-05,
"loss": 0.5783,
"step": 630
},
{
"epoch": 3.657142857142857,
"grad_norm": 6.59375,
"learning_rate": 8e-05,
"loss": 0.5087,
"step": 640
},
{
"epoch": 3.7142857142857144,
"grad_norm": 7.28125,
"learning_rate": 8e-05,
"loss": 0.4895,
"step": 650
},
{
"epoch": 3.7714285714285714,
"grad_norm": 6.6875,
"learning_rate": 8e-05,
"loss": 0.5505,
"step": 660
},
{
"epoch": 3.8285714285714287,
"grad_norm": 12.5625,
"learning_rate": 8e-05,
"loss": 0.5888,
"step": 670
},
{
"epoch": 3.8857142857142857,
"grad_norm": 13.375,
"learning_rate": 8e-05,
"loss": 0.5155,
"step": 680
},
{
"epoch": 3.942857142857143,
"grad_norm": 14.5,
"learning_rate": 8e-05,
"loss": 0.4999,
"step": 690
},
{
"epoch": 4.0,
"grad_norm": 6.8125,
"learning_rate": 8e-05,
"loss": 0.5957,
"step": 700
},
{
"epoch": 4.057142857142857,
"grad_norm": 6.625,
"learning_rate": 8e-05,
"loss": 0.377,
"step": 710
},
{
"epoch": 4.114285714285714,
"grad_norm": 6.6875,
"learning_rate": 8e-05,
"loss": 0.3669,
"step": 720
},
{
"epoch": 4.171428571428572,
"grad_norm": 13.0,
"learning_rate": 8e-05,
"loss": 0.4941,
"step": 730
},
{
"epoch": 4.228571428571429,
"grad_norm": 14.25,
"learning_rate": 8e-05,
"loss": 0.3737,
"step": 740
},
{
"epoch": 4.285714285714286,
"grad_norm": 7.65625,
"learning_rate": 8e-05,
"loss": 0.4366,
"step": 750
},
{
"epoch": 4.3428571428571425,
"grad_norm": 9.5625,
"learning_rate": 8e-05,
"loss": 0.4763,
"step": 760
},
{
"epoch": 4.4,
"grad_norm": 14.1875,
"learning_rate": 8e-05,
"loss": 0.4481,
"step": 770
},
{
"epoch": 4.457142857142857,
"grad_norm": 13.6875,
"learning_rate": 8e-05,
"loss": 0.4707,
"step": 780
},
{
"epoch": 4.514285714285714,
"grad_norm": 15.875,
"learning_rate": 8e-05,
"loss": 0.4706,
"step": 790
},
{
"epoch": 4.571428571428571,
"grad_norm": 5.78125,
"learning_rate": 8e-05,
"loss": 0.3984,
"step": 800
},
{
"epoch": 4.628571428571428,
"grad_norm": 7.34375,
"learning_rate": 8e-05,
"loss": 0.4837,
"step": 810
},
{
"epoch": 4.685714285714286,
"grad_norm": 13.8125,
"learning_rate": 8e-05,
"loss": 0.4525,
"step": 820
},
{
"epoch": 4.742857142857143,
"grad_norm": 13.75,
"learning_rate": 8e-05,
"loss": 0.4504,
"step": 830
},
{
"epoch": 4.8,
"grad_norm": 6.53125,
"learning_rate": 8e-05,
"loss": 0.4679,
"step": 840
},
{
"epoch": 4.857142857142857,
"grad_norm": 6.5,
"learning_rate": 8e-05,
"loss": 0.4356,
"step": 850
},
{
"epoch": 4.914285714285715,
"grad_norm": 13.75,
"learning_rate": 8e-05,
"loss": 0.4415,
"step": 860
},
{
"epoch": 4.9714285714285715,
"grad_norm": 12.25,
"learning_rate": 8e-05,
"loss": 0.4542,
"step": 870
},
{
"epoch": 5.0285714285714285,
"grad_norm": 7.0,
"learning_rate": 8e-05,
"loss": 0.4882,
"step": 880
},
{
"epoch": 5.085714285714285,
"grad_norm": 5.0,
"learning_rate": 8e-05,
"loss": 0.3197,
"step": 890
},
{
"epoch": 5.142857142857143,
"grad_norm": 13.375,
"learning_rate": 8e-05,
"loss": 0.3614,
"step": 900
},
{
"epoch": 5.2,
"grad_norm": 5.84375,
"learning_rate": 8e-05,
"loss": 0.3744,
"step": 910
},
{
"epoch": 5.257142857142857,
"grad_norm": 10.0,
"learning_rate": 8e-05,
"loss": 0.3811,
"step": 920
},
{
"epoch": 5.314285714285714,
"grad_norm": 12.0625,
"learning_rate": 8e-05,
"loss": 0.4349,
"step": 930
},
{
"epoch": 5.371428571428572,
"grad_norm": 5.5625,
"learning_rate": 8e-05,
"loss": 0.3253,
"step": 940
},
{
"epoch": 5.428571428571429,
"grad_norm": 5.46875,
"learning_rate": 8e-05,
"loss": 0.4326,
"step": 950
},
{
"epoch": 5.485714285714286,
"grad_norm": 4.8125,
"learning_rate": 8e-05,
"loss": 0.3973,
"step": 960
},
{
"epoch": 5.542857142857143,
"grad_norm": 6.65625,
"learning_rate": 8e-05,
"loss": 0.3823,
"step": 970
},
{
"epoch": 5.6,
"grad_norm": 13.0,
"learning_rate": 8e-05,
"loss": 0.4287,
"step": 980
},
{
"epoch": 5.6571428571428575,
"grad_norm": 12.6875,
"learning_rate": 8e-05,
"loss": 0.4264,
"step": 990
},
{
"epoch": 5.714285714285714,
"grad_norm": 6.46875,
"learning_rate": 8e-05,
"loss": 0.3398,
"step": 1000
},
{
"epoch": 5.714285714285714,
"eval_loss": 6.685291767120361,
"eval_runtime": 7.9381,
"eval_samples_per_second": 62.987,
"eval_steps_per_second": 62.987,
"step": 1000
},
{
"epoch": 5.771428571428571,
"grad_norm": 7.03125,
"learning_rate": 8e-05,
"loss": 0.3123,
"step": 1010
},
{
"epoch": 5.828571428571428,
"grad_norm": 6.59375,
"learning_rate": 8e-05,
"loss": 0.2876,
"step": 1020
},
{
"epoch": 5.885714285714286,
"grad_norm": 4.84375,
"learning_rate": 8e-05,
"loss": 0.3921,
"step": 1030
},
{
"epoch": 5.942857142857143,
"grad_norm": 12.0,
"learning_rate": 8e-05,
"loss": 0.3555,
"step": 1040
},
{
"epoch": 6.0,
"grad_norm": 6.4375,
"learning_rate": 8e-05,
"loss": 0.4566,
"step": 1050
},
{
"epoch": 6.057142857142857,
"grad_norm": 13.5,
"learning_rate": 8e-05,
"loss": 0.3109,
"step": 1060
},
{
"epoch": 6.114285714285714,
"grad_norm": 11.375,
"learning_rate": 8e-05,
"loss": 0.4008,
"step": 1070
},
{
"epoch": 6.171428571428572,
"grad_norm": 5.71875,
"learning_rate": 8e-05,
"loss": 0.3019,
"step": 1080
},
{
"epoch": 6.228571428571429,
"grad_norm": 3.671875,
"learning_rate": 8e-05,
"loss": 0.3714,
"step": 1090
},
{
"epoch": 6.285714285714286,
"grad_norm": 4.5,
"learning_rate": 8e-05,
"loss": 0.2272,
"step": 1100
},
{
"epoch": 6.3428571428571425,
"grad_norm": 13.9375,
"learning_rate": 8e-05,
"loss": 0.3953,
"step": 1110
},
{
"epoch": 6.4,
"grad_norm": 11.6875,
"learning_rate": 8e-05,
"loss": 0.3158,
"step": 1120
},
{
"epoch": 6.457142857142857,
"grad_norm": 13.125,
"learning_rate": 8e-05,
"loss": 0.3624,
"step": 1130
},
{
"epoch": 6.514285714285714,
"grad_norm": 6.5625,
"learning_rate": 8e-05,
"loss": 0.3308,
"step": 1140
},
{
"epoch": 6.571428571428571,
"grad_norm": 3.6875,
"learning_rate": 8e-05,
"loss": 0.2394,
"step": 1150
},
{
"epoch": 6.628571428571428,
"grad_norm": 3.1875,
"learning_rate": 8e-05,
"loss": 0.306,
"step": 1160
},
{
"epoch": 6.685714285714286,
"grad_norm": 9.75,
"learning_rate": 8e-05,
"loss": 0.3698,
"step": 1170
},
{
"epoch": 6.742857142857143,
"grad_norm": 3.53125,
"learning_rate": 8e-05,
"loss": 0.3485,
"step": 1180
},
{
"epoch": 6.8,
"grad_norm": 13.25,
"learning_rate": 8e-05,
"loss": 0.342,
"step": 1190
},
{
"epoch": 6.857142857142857,
"grad_norm": 28.5,
"learning_rate": 8e-05,
"loss": 0.3361,
"step": 1200
},
{
"epoch": 6.914285714285715,
"grad_norm": 7.84375,
"learning_rate": 8e-05,
"loss": 0.3526,
"step": 1210
},
{
"epoch": 6.9714285714285715,
"grad_norm": 8.0,
"learning_rate": 8e-05,
"loss": 0.3662,
"step": 1220
},
{
"epoch": 7.0285714285714285,
"grad_norm": 4.21875,
"learning_rate": 8e-05,
"loss": 0.3469,
"step": 1230
},
{
"epoch": 7.085714285714285,
"grad_norm": 7.84375,
"learning_rate": 8e-05,
"loss": 0.2773,
"step": 1240
},
{
"epoch": 7.142857142857143,
"grad_norm": 11.9375,
"learning_rate": 8e-05,
"loss": 0.2524,
"step": 1250
},
{
"epoch": 7.2,
"grad_norm": 13.1875,
"learning_rate": 8e-05,
"loss": 0.389,
"step": 1260
},
{
"epoch": 7.257142857142857,
"grad_norm": 12.4375,
"learning_rate": 8e-05,
"loss": 0.2766,
"step": 1270
},
{
"epoch": 7.314285714285714,
"grad_norm": 2.359375,
"learning_rate": 8e-05,
"loss": 0.3331,
"step": 1280
},
{
"epoch": 7.371428571428572,
"grad_norm": 11.4375,
"learning_rate": 8e-05,
"loss": 0.2712,
"step": 1290
},
{
"epoch": 7.428571428571429,
"grad_norm": 11.625,
"learning_rate": 8e-05,
"loss": 0.4544,
"step": 1300
},
{
"epoch": 7.485714285714286,
"grad_norm": 3.6875,
"learning_rate": 8e-05,
"loss": 0.3047,
"step": 1310
},
{
"epoch": 7.542857142857143,
"grad_norm": 3.609375,
"learning_rate": 8e-05,
"loss": 0.2868,
"step": 1320
},
{
"epoch": 7.6,
"grad_norm": 13.5,
"learning_rate": 8e-05,
"loss": 0.3294,
"step": 1330
},
{
"epoch": 7.6571428571428575,
"grad_norm": 2.78125,
"learning_rate": 8e-05,
"loss": 0.3252,
"step": 1340
},
{
"epoch": 7.714285714285714,
"grad_norm": 13.3125,
"learning_rate": 8e-05,
"loss": 0.2855,
"step": 1350
},
{
"epoch": 7.771428571428571,
"grad_norm": 3.71875,
"learning_rate": 8e-05,
"loss": 0.3158,
"step": 1360
},
{
"epoch": 7.828571428571428,
"grad_norm": 7.28125,
"learning_rate": 8e-05,
"loss": 0.2767,
"step": 1370
},
{
"epoch": 7.885714285714286,
"grad_norm": 2.53125,
"learning_rate": 8e-05,
"loss": 0.2423,
"step": 1380
},
{
"epoch": 7.942857142857143,
"grad_norm": 6.03125,
"learning_rate": 8e-05,
"loss": 0.2216,
"step": 1390
},
{
"epoch": 8.0,
"grad_norm": 4.96875,
"learning_rate": 8e-05,
"loss": 0.3818,
"step": 1400
},
{
"epoch": 8.057142857142857,
"grad_norm": 5.1875,
"learning_rate": 8e-05,
"loss": 0.2588,
"step": 1410
},
{
"epoch": 8.114285714285714,
"grad_norm": 12.3125,
"learning_rate": 8e-05,
"loss": 0.3463,
"step": 1420
},
{
"epoch": 8.17142857142857,
"grad_norm": 2.8125,
"learning_rate": 8e-05,
"loss": 0.2003,
"step": 1430
},
{
"epoch": 8.228571428571428,
"grad_norm": 12.6875,
"learning_rate": 8e-05,
"loss": 0.3551,
"step": 1440
},
{
"epoch": 8.285714285714286,
"grad_norm": 12.6875,
"learning_rate": 8e-05,
"loss": 0.3274,
"step": 1450
},
{
"epoch": 8.342857142857143,
"grad_norm": 1.3046875,
"learning_rate": 8e-05,
"loss": 0.2906,
"step": 1460
},
{
"epoch": 8.4,
"grad_norm": 1.9453125,
"learning_rate": 8e-05,
"loss": 0.3483,
"step": 1470
},
{
"epoch": 8.457142857142857,
"grad_norm": 12.125,
"learning_rate": 8e-05,
"loss": 0.2865,
"step": 1480
},
{
"epoch": 8.514285714285714,
"grad_norm": 13.4375,
"learning_rate": 8e-05,
"loss": 0.334,
"step": 1490
},
{
"epoch": 8.571428571428571,
"grad_norm": 5.125,
"learning_rate": 8e-05,
"loss": 0.2815,
"step": 1500
},
{
"epoch": 8.571428571428571,
"eval_loss": 7.375263690948486,
"eval_runtime": 7.8964,
"eval_samples_per_second": 63.32,
"eval_steps_per_second": 63.32,
"step": 1500
},
{
"epoch": 8.628571428571428,
"grad_norm": 13.3125,
"learning_rate": 8e-05,
"loss": 0.1534,
"step": 1510
},
{
"epoch": 8.685714285714285,
"grad_norm": 10.6875,
"learning_rate": 8e-05,
"loss": 0.2531,
"step": 1520
},
{
"epoch": 8.742857142857144,
"grad_norm": 5.28125,
"learning_rate": 8e-05,
"loss": 0.3664,
"step": 1530
},
{
"epoch": 8.8,
"grad_norm": 3.390625,
"learning_rate": 8e-05,
"loss": 0.2378,
"step": 1540
},
{
"epoch": 8.857142857142858,
"grad_norm": 13.3125,
"learning_rate": 8e-05,
"loss": 0.3167,
"step": 1550
},
{
"epoch": 8.914285714285715,
"grad_norm": 3.0,
"learning_rate": 8e-05,
"loss": 0.2882,
"step": 1560
},
{
"epoch": 8.971428571428572,
"grad_norm": 1.984375,
"learning_rate": 8e-05,
"loss": 0.2518,
"step": 1570
},
{
"epoch": 9.028571428571428,
"grad_norm": 9.5625,
"learning_rate": 8e-05,
"loss": 0.1575,
"step": 1580
},
{
"epoch": 9.085714285714285,
"grad_norm": 3.96875,
"learning_rate": 8e-05,
"loss": 0.1379,
"step": 1590
},
{
"epoch": 9.142857142857142,
"grad_norm": 10.4375,
"learning_rate": 8e-05,
"loss": 0.3056,
"step": 1600
},
{
"epoch": 9.2,
"grad_norm": 3.234375,
"learning_rate": 8e-05,
"loss": 0.2755,
"step": 1610
},
{
"epoch": 9.257142857142856,
"grad_norm": 1.078125,
"learning_rate": 8e-05,
"loss": 0.2795,
"step": 1620
},
{
"epoch": 9.314285714285715,
"grad_norm": 12.25,
"learning_rate": 8e-05,
"loss": 0.2939,
"step": 1630
},
{
"epoch": 9.371428571428572,
"grad_norm": 4.0,
"learning_rate": 8e-05,
"loss": 0.1852,
"step": 1640
},
{
"epoch": 9.428571428571429,
"grad_norm": 9.1875,
"learning_rate": 8e-05,
"loss": 0.2803,
"step": 1650
},
{
"epoch": 9.485714285714286,
"grad_norm": 13.625,
"learning_rate": 8e-05,
"loss": 0.2908,
"step": 1660
},
{
"epoch": 9.542857142857143,
"grad_norm": 13.4375,
"learning_rate": 8e-05,
"loss": 0.3115,
"step": 1670
},
{
"epoch": 9.6,
"grad_norm": 3.796875,
"learning_rate": 8e-05,
"loss": 0.4095,
"step": 1680
},
{
"epoch": 9.657142857142857,
"grad_norm": 2.484375,
"learning_rate": 8e-05,
"loss": 0.2278,
"step": 1690
},
{
"epoch": 9.714285714285714,
"grad_norm": 11.75,
"learning_rate": 8e-05,
"loss": 0.3629,
"step": 1700
},
{
"epoch": 9.771428571428572,
"grad_norm": 2.8125,
"learning_rate": 8e-05,
"loss": 0.2527,
"step": 1710
},
{
"epoch": 9.82857142857143,
"grad_norm": 12.75,
"learning_rate": 8e-05,
"loss": 0.2514,
"step": 1720
},
{
"epoch": 9.885714285714286,
"grad_norm": 6.90625,
"learning_rate": 8e-05,
"loss": 0.238,
"step": 1730
},
{
"epoch": 9.942857142857143,
"grad_norm": 12.375,
"learning_rate": 8e-05,
"loss": 0.2673,
"step": 1740
},
{
"epoch": 10.0,
"grad_norm": 11.5,
"learning_rate": 8e-05,
"loss": 0.3351,
"step": 1750
},
{
"epoch": 10.057142857142857,
"grad_norm": 13.0625,
"learning_rate": 8e-05,
"loss": 0.255,
"step": 1760
},
{
"epoch": 10.114285714285714,
"grad_norm": 3.171875,
"learning_rate": 8e-05,
"loss": 0.2478,
"step": 1770
},
{
"epoch": 10.17142857142857,
"grad_norm": 1.25,
"learning_rate": 8e-05,
"loss": 0.1548,
"step": 1780
},
{
"epoch": 10.228571428571428,
"grad_norm": 3.078125,
"learning_rate": 8e-05,
"loss": 0.3276,
"step": 1790
},
{
"epoch": 10.285714285714286,
"grad_norm": 2.21875,
"learning_rate": 8e-05,
"loss": 0.2305,
"step": 1800
},
{
"epoch": 10.342857142857143,
"grad_norm": 11.3125,
"learning_rate": 8e-05,
"loss": 0.2625,
"step": 1810
},
{
"epoch": 10.4,
"grad_norm": 10.375,
"learning_rate": 8e-05,
"loss": 0.2509,
"step": 1820
},
{
"epoch": 10.457142857142857,
"grad_norm": 2.25,
"learning_rate": 8e-05,
"loss": 0.2091,
"step": 1830
},
{
"epoch": 10.514285714285714,
"grad_norm": 3.5,
"learning_rate": 8e-05,
"loss": 0.2416,
"step": 1840
},
{
"epoch": 10.571428571428571,
"grad_norm": 12.1875,
"learning_rate": 8e-05,
"loss": 0.3821,
"step": 1850
},
{
"epoch": 10.628571428571428,
"grad_norm": 11.0625,
"learning_rate": 8e-05,
"loss": 0.3066,
"step": 1860
},
{
"epoch": 10.685714285714285,
"grad_norm": 1.078125,
"learning_rate": 8e-05,
"loss": 0.1716,
"step": 1870
},
{
"epoch": 10.742857142857144,
"grad_norm": 12.6875,
"learning_rate": 8e-05,
"loss": 0.325,
"step": 1880
},
{
"epoch": 10.8,
"grad_norm": 2.046875,
"learning_rate": 8e-05,
"loss": 0.2183,
"step": 1890
},
{
"epoch": 10.857142857142858,
"grad_norm": 3.90625,
"learning_rate": 8e-05,
"loss": 0.2748,
"step": 1900
},
{
"epoch": 10.914285714285715,
"grad_norm": 11.5,
"learning_rate": 8e-05,
"loss": 0.2748,
"step": 1910
},
{
"epoch": 10.971428571428572,
"grad_norm": 2.765625,
"learning_rate": 8e-05,
"loss": 0.2093,
"step": 1920
},
{
"epoch": 11.028571428571428,
"grad_norm": 2.78125,
"learning_rate": 8e-05,
"loss": 0.3204,
"step": 1930
},
{
"epoch": 11.085714285714285,
"grad_norm": 12.375,
"learning_rate": 8e-05,
"loss": 0.2653,
"step": 1940
},
{
"epoch": 11.142857142857142,
"grad_norm": 1.4453125,
"learning_rate": 8e-05,
"loss": 0.1379,
"step": 1950
},
{
"epoch": 11.2,
"grad_norm": 12.0,
"learning_rate": 8e-05,
"loss": 0.3495,
"step": 1960
},
{
"epoch": 11.257142857142856,
"grad_norm": 0.96875,
"learning_rate": 8e-05,
"loss": 0.0931,
"step": 1970
},
{
"epoch": 11.314285714285715,
"grad_norm": 2.1875,
"learning_rate": 8e-05,
"loss": 0.3159,
"step": 1980
},
{
"epoch": 11.371428571428572,
"grad_norm": 1.7734375,
"learning_rate": 8e-05,
"loss": 0.2834,
"step": 1990
},
{
"epoch": 11.428571428571429,
"grad_norm": 2.46875,
"learning_rate": 8e-05,
"loss": 0.298,
"step": 2000
},
{
"epoch": 11.428571428571429,
"eval_loss": 7.439001083374023,
"eval_runtime": 7.8289,
"eval_samples_per_second": 63.866,
"eval_steps_per_second": 63.866,
"step": 2000
},
{
"epoch": 11.485714285714286,
"grad_norm": 2.578125,
"learning_rate": 8e-05,
"loss": 0.2116,
"step": 2010
},
{
"epoch": 11.542857142857143,
"grad_norm": 13.0,
"learning_rate": 8e-05,
"loss": 0.3693,
"step": 2020
},
{
"epoch": 11.6,
"grad_norm": 1.40625,
"learning_rate": 8e-05,
"loss": 0.1701,
"step": 2030
},
{
"epoch": 11.657142857142857,
"grad_norm": 13.25,
"learning_rate": 8e-05,
"loss": 0.2949,
"step": 2040
},
{
"epoch": 11.714285714285714,
"grad_norm": 1.5546875,
"learning_rate": 8e-05,
"loss": 0.207,
"step": 2050
},
{
"epoch": 11.771428571428572,
"grad_norm": 13.5,
"learning_rate": 8e-05,
"loss": 0.372,
"step": 2060
},
{
"epoch": 11.82857142857143,
"grad_norm": 2.296875,
"learning_rate": 8e-05,
"loss": 0.3137,
"step": 2070
},
{
"epoch": 11.885714285714286,
"grad_norm": 13.3125,
"learning_rate": 8e-05,
"loss": 0.3433,
"step": 2080
},
{
"epoch": 11.942857142857143,
"grad_norm": 1.875,
"learning_rate": 8e-05,
"loss": 0.2197,
"step": 2090
},
{
"epoch": 12.0,
"grad_norm": 14.3125,
"learning_rate": 8e-05,
"loss": 0.2616,
"step": 2100
},
{
"epoch": 12.057142857142857,
"grad_norm": 13.9375,
"learning_rate": 8e-05,
"loss": 0.3095,
"step": 2110
},
{
"epoch": 12.114285714285714,
"grad_norm": 11.75,
"learning_rate": 8e-05,
"loss": 0.2683,
"step": 2120
},
{
"epoch": 12.17142857142857,
"grad_norm": 3.015625,
"learning_rate": 8e-05,
"loss": 0.2083,
"step": 2130
},
{
"epoch": 12.228571428571428,
"grad_norm": 1.6015625,
"learning_rate": 8e-05,
"loss": 0.17,
"step": 2140
},
{
"epoch": 12.285714285714286,
"grad_norm": 2.875,
"learning_rate": 8e-05,
"loss": 0.1634,
"step": 2150
},
{
"epoch": 12.342857142857143,
"grad_norm": 1.5,
"learning_rate": 8e-05,
"loss": 0.3009,
"step": 2160
},
{
"epoch": 12.4,
"grad_norm": 12.75,
"learning_rate": 8e-05,
"loss": 0.1983,
"step": 2170
},
{
"epoch": 12.457142857142857,
"grad_norm": 1.765625,
"learning_rate": 8e-05,
"loss": 0.244,
"step": 2180
},
{
"epoch": 12.514285714285714,
"grad_norm": 13.1875,
"learning_rate": 8e-05,
"loss": 0.3322,
"step": 2190
},
{
"epoch": 12.571428571428571,
"grad_norm": 13.75,
"learning_rate": 8e-05,
"loss": 0.2492,
"step": 2200
},
{
"epoch": 12.628571428571428,
"grad_norm": 1.625,
"learning_rate": 8e-05,
"loss": 0.2741,
"step": 2210
},
{
"epoch": 12.685714285714285,
"grad_norm": 3.21875,
"learning_rate": 8e-05,
"loss": 0.2587,
"step": 2220
},
{
"epoch": 12.742857142857144,
"grad_norm": 2.515625,
"learning_rate": 8e-05,
"loss": 0.3302,
"step": 2230
},
{
"epoch": 12.8,
"grad_norm": 13.25,
"learning_rate": 8e-05,
"loss": 0.2935,
"step": 2240
},
{
"epoch": 12.857142857142858,
"grad_norm": 1.265625,
"learning_rate": 8e-05,
"loss": 0.1977,
"step": 2250
},
{
"epoch": 12.914285714285715,
"grad_norm": 11.9375,
"learning_rate": 8e-05,
"loss": 0.3059,
"step": 2260
},
{
"epoch": 12.971428571428572,
"grad_norm": 11.625,
"learning_rate": 8e-05,
"loss": 0.2862,
"step": 2270
},
{
"epoch": 13.028571428571428,
"grad_norm": 2.546875,
"learning_rate": 8e-05,
"loss": 0.2819,
"step": 2280
},
{
"epoch": 13.085714285714285,
"grad_norm": 12.875,
"learning_rate": 8e-05,
"loss": 0.2451,
"step": 2290
},
{
"epoch": 13.142857142857142,
"grad_norm": 1.3125,
"learning_rate": 8e-05,
"loss": 0.1908,
"step": 2300
},
{
"epoch": 13.2,
"grad_norm": 10.125,
"learning_rate": 8e-05,
"loss": 0.1633,
"step": 2310
},
{
"epoch": 13.257142857142856,
"grad_norm": 2.890625,
"learning_rate": 8e-05,
"loss": 0.2328,
"step": 2320
},
{
"epoch": 13.314285714285715,
"grad_norm": 1.8203125,
"learning_rate": 8e-05,
"loss": 0.2568,
"step": 2330
},
{
"epoch": 13.371428571428572,
"grad_norm": 2.0,
"learning_rate": 8e-05,
"loss": 0.2639,
"step": 2340
},
{
"epoch": 13.428571428571429,
"grad_norm": 12.1875,
"learning_rate": 8e-05,
"loss": 0.174,
"step": 2350
},
{
"epoch": 13.485714285714286,
"grad_norm": 12.875,
"learning_rate": 8e-05,
"loss": 0.2579,
"step": 2360
},
{
"epoch": 13.542857142857143,
"grad_norm": 12.0625,
"learning_rate": 8e-05,
"loss": 0.2111,
"step": 2370
},
{
"epoch": 13.6,
"grad_norm": 1.7265625,
"learning_rate": 8e-05,
"loss": 0.3664,
"step": 2380
},
{
"epoch": 13.657142857142857,
"grad_norm": 2.96875,
"learning_rate": 8e-05,
"loss": 0.2923,
"step": 2390
},
{
"epoch": 13.714285714285714,
"grad_norm": 1.4296875,
"learning_rate": 8e-05,
"loss": 0.206,
"step": 2400
},
{
"epoch": 13.771428571428572,
"grad_norm": 11.25,
"learning_rate": 8e-05,
"loss": 0.3807,
"step": 2410
},
{
"epoch": 13.82857142857143,
"grad_norm": 1.6328125,
"learning_rate": 8e-05,
"loss": 0.2344,
"step": 2420
},
{
"epoch": 13.885714285714286,
"grad_norm": 12.3125,
"learning_rate": 8e-05,
"loss": 0.2925,
"step": 2430
},
{
"epoch": 13.942857142857143,
"grad_norm": 2.0,
"learning_rate": 8e-05,
"loss": 0.1925,
"step": 2440
},
{
"epoch": 14.0,
"grad_norm": 11.875,
"learning_rate": 8e-05,
"loss": 0.3541,
"step": 2450
},
{
"epoch": 14.057142857142857,
"grad_norm": 12.1875,
"learning_rate": 8e-05,
"loss": 0.2747,
"step": 2460
},
{
"epoch": 14.114285714285714,
"grad_norm": 1.4765625,
"learning_rate": 8e-05,
"loss": 0.1139,
"step": 2470
},
{
"epoch": 14.17142857142857,
"grad_norm": 3.171875,
"learning_rate": 8e-05,
"loss": 0.2117,
"step": 2480
},
{
"epoch": 14.228571428571428,
"grad_norm": 2.671875,
"learning_rate": 8e-05,
"loss": 0.2845,
"step": 2490
},
{
"epoch": 14.285714285714286,
"grad_norm": 12.0,
"learning_rate": 8e-05,
"loss": 0.2977,
"step": 2500
},
{
"epoch": 14.285714285714286,
"eval_loss": 8.100706100463867,
"eval_runtime": 7.5531,
"eval_samples_per_second": 66.198,
"eval_steps_per_second": 66.198,
"step": 2500
},
{
"epoch": 14.342857142857143,
"grad_norm": 1.640625,
"learning_rate": 8e-05,
"loss": 0.2609,
"step": 2510
},
{
"epoch": 14.4,
"grad_norm": 2.046875,
"learning_rate": 8e-05,
"loss": 0.2843,
"step": 2520
},
{
"epoch": 14.457142857142857,
"grad_norm": 12.125,
"learning_rate": 8e-05,
"loss": 0.2175,
"step": 2530
},
{
"epoch": 14.514285714285714,
"grad_norm": 13.0625,
"learning_rate": 8e-05,
"loss": 0.2517,
"step": 2540
},
{
"epoch": 14.571428571428571,
"grad_norm": 11.875,
"learning_rate": 8e-05,
"loss": 0.4215,
"step": 2550
},
{
"epoch": 14.628571428571428,
"grad_norm": 2.921875,
"learning_rate": 8e-05,
"loss": 0.2922,
"step": 2560
},
{
"epoch": 14.685714285714285,
"grad_norm": 11.75,
"learning_rate": 8e-05,
"loss": 0.1465,
"step": 2570
},
{
"epoch": 14.742857142857144,
"grad_norm": 13.25,
"learning_rate": 8e-05,
"loss": 0.1883,
"step": 2580
},
{
"epoch": 14.8,
"grad_norm": 1.703125,
"learning_rate": 8e-05,
"loss": 0.2466,
"step": 2590
},
{
"epoch": 14.857142857142858,
"grad_norm": 13.0,
"learning_rate": 8e-05,
"loss": 0.3114,
"step": 2600
},
{
"epoch": 14.914285714285715,
"grad_norm": 1.2421875,
"learning_rate": 8e-05,
"loss": 0.3011,
"step": 2610
},
{
"epoch": 14.971428571428572,
"grad_norm": 2.46875,
"learning_rate": 8e-05,
"loss": 0.1617,
"step": 2620
},
{
"epoch": 15.028571428571428,
"grad_norm": 2.125,
"learning_rate": 8e-05,
"loss": 0.2399,
"step": 2630
},
{
"epoch": 15.085714285714285,
"grad_norm": 12.875,
"learning_rate": 8e-05,
"loss": 0.3677,
"step": 2640
},
{
"epoch": 15.142857142857142,
"grad_norm": 3.09375,
"learning_rate": 8e-05,
"loss": 0.1981,
"step": 2650
},
{
"epoch": 15.2,
"grad_norm": 1.9375,
"learning_rate": 8e-05,
"loss": 0.1607,
"step": 2660
},
{
"epoch": 15.257142857142856,
"grad_norm": 14.1875,
"learning_rate": 8e-05,
"loss": 0.2697,
"step": 2670
},
{
"epoch": 15.314285714285715,
"grad_norm": 54.5,
"learning_rate": 8e-05,
"loss": 0.2507,
"step": 2680
},
{
"epoch": 15.371428571428572,
"grad_norm": 9.625,
"learning_rate": 8e-05,
"loss": 0.2347,
"step": 2690
},
{
"epoch": 15.428571428571429,
"grad_norm": 1.921875,
"learning_rate": 8e-05,
"loss": 0.176,
"step": 2700
},
{
"epoch": 15.485714285714286,
"grad_norm": 12.9375,
"learning_rate": 8e-05,
"loss": 0.2868,
"step": 2710
},
{
"epoch": 15.542857142857143,
"grad_norm": 1.0859375,
"learning_rate": 8e-05,
"loss": 0.2933,
"step": 2720
},
{
"epoch": 15.6,
"grad_norm": 1.328125,
"learning_rate": 8e-05,
"loss": 0.1255,
"step": 2730
},
{
"epoch": 15.657142857142857,
"grad_norm": 0.86328125,
"learning_rate": 8e-05,
"loss": 0.0946,
"step": 2740
},
{
"epoch": 15.714285714285714,
"grad_norm": 0.87890625,
"learning_rate": 8e-05,
"loss": 0.215,
"step": 2750
},
{
"epoch": 15.771428571428572,
"grad_norm": 12.1875,
"learning_rate": 8e-05,
"loss": 0.3107,
"step": 2760
},
{
"epoch": 15.82857142857143,
"grad_norm": 1.8671875,
"learning_rate": 8e-05,
"loss": 0.3236,
"step": 2770
},
{
"epoch": 15.885714285714286,
"grad_norm": 1.0703125,
"learning_rate": 8e-05,
"loss": 0.3173,
"step": 2780
},
{
"epoch": 15.942857142857143,
"grad_norm": 12.3125,
"learning_rate": 8e-05,
"loss": 0.3143,
"step": 2790
},
{
"epoch": 16.0,
"grad_norm": 1.7890625,
"learning_rate": 8e-05,
"loss": 0.2744,
"step": 2800
},
{
"epoch": 16.057142857142857,
"grad_norm": 1.4765625,
"learning_rate": 8e-05,
"loss": 0.2547,
"step": 2810
},
{
"epoch": 16.114285714285714,
"grad_norm": 12.5,
"learning_rate": 8e-05,
"loss": 0.2173,
"step": 2820
},
{
"epoch": 16.17142857142857,
"grad_norm": 0.796875,
"learning_rate": 8e-05,
"loss": 0.1791,
"step": 2830
},
{
"epoch": 16.228571428571428,
"grad_norm": 2.46875,
"learning_rate": 8e-05,
"loss": 0.277,
"step": 2840
},
{
"epoch": 16.285714285714285,
"grad_norm": 12.625,
"learning_rate": 8e-05,
"loss": 0.2995,
"step": 2850
},
{
"epoch": 16.34285714285714,
"grad_norm": 13.0,
"learning_rate": 8e-05,
"loss": 0.1871,
"step": 2860
},
{
"epoch": 16.4,
"grad_norm": 12.5,
"learning_rate": 8e-05,
"loss": 0.299,
"step": 2870
},
{
"epoch": 16.457142857142856,
"grad_norm": 1.375,
"learning_rate": 8e-05,
"loss": 0.2655,
"step": 2880
},
{
"epoch": 16.514285714285712,
"grad_norm": 1.0390625,
"learning_rate": 8e-05,
"loss": 0.17,
"step": 2890
},
{
"epoch": 16.571428571428573,
"grad_norm": 2.578125,
"learning_rate": 8e-05,
"loss": 0.2232,
"step": 2900
},
{
"epoch": 16.62857142857143,
"grad_norm": 10.4375,
"learning_rate": 8e-05,
"loss": 0.2826,
"step": 2910
},
{
"epoch": 16.685714285714287,
"grad_norm": 11.8125,
"learning_rate": 8e-05,
"loss": 0.3659,
"step": 2920
},
{
"epoch": 16.742857142857144,
"grad_norm": 1.71875,
"learning_rate": 8e-05,
"loss": 0.26,
"step": 2930
},
{
"epoch": 16.8,
"grad_norm": 13.125,
"learning_rate": 8e-05,
"loss": 0.2699,
"step": 2940
},
{
"epoch": 16.857142857142858,
"grad_norm": 1.515625,
"learning_rate": 8e-05,
"loss": 0.238,
"step": 2950
},
{
"epoch": 16.914285714285715,
"grad_norm": 3.390625,
"learning_rate": 8e-05,
"loss": 0.2692,
"step": 2960
},
{
"epoch": 16.97142857142857,
"grad_norm": 1.5625,
"learning_rate": 8e-05,
"loss": 0.1514,
"step": 2970
},
{
"epoch": 17.02857142857143,
"grad_norm": 1.140625,
"learning_rate": 8e-05,
"loss": 0.1644,
"step": 2980
},
{
"epoch": 17.085714285714285,
"grad_norm": 12.5625,
"learning_rate": 8e-05,
"loss": 0.3619,
"step": 2990
},
{
"epoch": 17.142857142857142,
"grad_norm": 1.1328125,
"learning_rate": 8e-05,
"loss": 0.1402,
"step": 3000
},
{
"epoch": 17.142857142857142,
"eval_loss": 8.192225456237793,
"eval_runtime": 7.8572,
"eval_samples_per_second": 63.636,
"eval_steps_per_second": 63.636,
"step": 3000
},
{
"epoch": 17.2,
"grad_norm": 1.1328125,
"learning_rate": 8e-05,
"loss": 0.279,
"step": 3010
},
{
"epoch": 17.257142857142856,
"grad_norm": 12.5,
"learning_rate": 8e-05,
"loss": 0.394,
"step": 3020
},
{
"epoch": 17.314285714285713,
"grad_norm": 12.0,
"learning_rate": 8e-05,
"loss": 0.2386,
"step": 3030
},
{
"epoch": 17.37142857142857,
"grad_norm": 11.3125,
"learning_rate": 8e-05,
"loss": 0.3098,
"step": 3040
},
{
"epoch": 17.428571428571427,
"grad_norm": 10.875,
"learning_rate": 8e-05,
"loss": 0.1654,
"step": 3050
},
{
"epoch": 17.485714285714284,
"grad_norm": 2.453125,
"learning_rate": 8e-05,
"loss": 0.2084,
"step": 3060
},
{
"epoch": 17.542857142857144,
"grad_norm": 13.3125,
"learning_rate": 8e-05,
"loss": 0.2444,
"step": 3070
},
{
"epoch": 17.6,
"grad_norm": 1.328125,
"learning_rate": 8e-05,
"loss": 0.2768,
"step": 3080
},
{
"epoch": 17.65714285714286,
"grad_norm": 1.6328125,
"learning_rate": 8e-05,
"loss": 0.2225,
"step": 3090
},
{
"epoch": 17.714285714285715,
"grad_norm": 0.8984375,
"learning_rate": 8e-05,
"loss": 0.2237,
"step": 3100
},
{
"epoch": 17.771428571428572,
"grad_norm": 12.0625,
"learning_rate": 8e-05,
"loss": 0.231,
"step": 3110
},
{
"epoch": 17.82857142857143,
"grad_norm": 12.125,
"learning_rate": 8e-05,
"loss": 0.1867,
"step": 3120
},
{
"epoch": 17.885714285714286,
"grad_norm": 0.96875,
"learning_rate": 8e-05,
"loss": 0.2167,
"step": 3130
},
{
"epoch": 17.942857142857143,
"grad_norm": 1.5,
"learning_rate": 8e-05,
"loss": 0.1154,
"step": 3140
},
{
"epoch": 18.0,
"grad_norm": 11.375,
"learning_rate": 8e-05,
"loss": 0.3082,
"step": 3150
},
{
"epoch": 18.057142857142857,
"grad_norm": 12.0,
"learning_rate": 8e-05,
"loss": 0.283,
"step": 3160
},
{
"epoch": 18.114285714285714,
"grad_norm": 12.5,
"learning_rate": 8e-05,
"loss": 0.2062,
"step": 3170
},
{
"epoch": 18.17142857142857,
"grad_norm": 11.0625,
"learning_rate": 8e-05,
"loss": 0.3119,
"step": 3180
},
{
"epoch": 18.228571428571428,
"grad_norm": 1.6015625,
"learning_rate": 8e-05,
"loss": 0.2351,
"step": 3190
},
{
"epoch": 18.285714285714285,
"grad_norm": 12.4375,
"learning_rate": 8e-05,
"loss": 0.2614,
"step": 3200
},
{
"epoch": 18.34285714285714,
"grad_norm": 13.1875,
"learning_rate": 8e-05,
"loss": 0.2687,
"step": 3210
},
{
"epoch": 18.4,
"grad_norm": 13.375,
"learning_rate": 8e-05,
"loss": 0.3616,
"step": 3220
},
{
"epoch": 18.457142857142856,
"grad_norm": 2.203125,
"learning_rate": 8e-05,
"loss": 0.2505,
"step": 3230
},
{
"epoch": 18.514285714285712,
"grad_norm": 1.7734375,
"learning_rate": 8e-05,
"loss": 0.18,
"step": 3240
},
{
"epoch": 18.571428571428573,
"grad_norm": 1.421875,
"learning_rate": 8e-05,
"loss": 0.2074,
"step": 3250
},
{
"epoch": 18.62857142857143,
"grad_norm": 11.875,
"learning_rate": 8e-05,
"loss": 0.2854,
"step": 3260
},
{
"epoch": 18.685714285714287,
"grad_norm": 2.34375,
"learning_rate": 8e-05,
"loss": 0.1504,
"step": 3270
},
{
"epoch": 18.742857142857144,
"grad_norm": 1.5625,
"learning_rate": 8e-05,
"loss": 0.2151,
"step": 3280
},
{
"epoch": 18.8,
"grad_norm": 12.4375,
"learning_rate": 8e-05,
"loss": 0.2479,
"step": 3290
},
{
"epoch": 18.857142857142858,
"grad_norm": 1.65625,
"learning_rate": 8e-05,
"loss": 0.2759,
"step": 3300
},
{
"epoch": 18.914285714285715,
"grad_norm": 1.78125,
"learning_rate": 8e-05,
"loss": 0.3888,
"step": 3310
},
{
"epoch": 18.97142857142857,
"grad_norm": 1.46875,
"learning_rate": 8e-05,
"loss": 0.1576,
"step": 3320
},
{
"epoch": 19.02857142857143,
"grad_norm": 11.375,
"learning_rate": 8e-05,
"loss": 0.2192,
"step": 3330
},
{
"epoch": 19.085714285714285,
"grad_norm": 14.5625,
"learning_rate": 8e-05,
"loss": 0.2596,
"step": 3340
},
{
"epoch": 19.142857142857142,
"grad_norm": 0.8828125,
"learning_rate": 8e-05,
"loss": 0.1673,
"step": 3350
},
{
"epoch": 19.2,
"grad_norm": 13.25,
"learning_rate": 8e-05,
"loss": 0.3031,
"step": 3360
},
{
"epoch": 19.257142857142856,
"grad_norm": 11.6875,
"learning_rate": 8e-05,
"loss": 0.3253,
"step": 3370
},
{
"epoch": 19.314285714285713,
"grad_norm": 12.8125,
"learning_rate": 8e-05,
"loss": 0.257,
"step": 3380
},
{
"epoch": 19.37142857142857,
"grad_norm": 10.1875,
"learning_rate": 8e-05,
"loss": 0.2625,
"step": 3390
},
{
"epoch": 19.428571428571427,
"grad_norm": 11.75,
"learning_rate": 8e-05,
"loss": 0.3655,
"step": 3400
},
{
"epoch": 19.485714285714284,
"grad_norm": 2.546875,
"learning_rate": 8e-05,
"loss": 0.1244,
"step": 3410
},
{
"epoch": 19.542857142857144,
"grad_norm": 1.3984375,
"learning_rate": 8e-05,
"loss": 0.1875,
"step": 3420
},
{
"epoch": 19.6,
"grad_norm": 0.83203125,
"learning_rate": 8e-05,
"loss": 0.1918,
"step": 3430
},
{
"epoch": 19.65714285714286,
"grad_norm": 12.6875,
"learning_rate": 8e-05,
"loss": 0.2807,
"step": 3440
},
{
"epoch": 19.714285714285715,
"grad_norm": 1.5078125,
"learning_rate": 8e-05,
"loss": 0.1853,
"step": 3450
},
{
"epoch": 19.771428571428572,
"grad_norm": 1.359375,
"learning_rate": 8e-05,
"loss": 0.1957,
"step": 3460
},
{
"epoch": 19.82857142857143,
"grad_norm": 2.15625,
"learning_rate": 8e-05,
"loss": 0.2659,
"step": 3470
},
{
"epoch": 19.885714285714286,
"grad_norm": 11.3125,
"learning_rate": 8e-05,
"loss": 0.2069,
"step": 3480
},
{
"epoch": 19.942857142857143,
"grad_norm": 0.90625,
"learning_rate": 8e-05,
"loss": 0.2495,
"step": 3490
},
{
"epoch": 20.0,
"grad_norm": 1.6875,
"learning_rate": 8e-05,
"loss": 0.2491,
"step": 3500
},
{
"epoch": 20.0,
"eval_loss": 8.880386352539062,
"eval_runtime": 7.8925,
"eval_samples_per_second": 63.351,
"eval_steps_per_second": 63.351,
"step": 3500
},
{
"epoch": 20.057142857142857,
"grad_norm": 1.0234375,
"learning_rate": 8e-05,
"loss": 0.2605,
"step": 3510
},
{
"epoch": 20.114285714285714,
"grad_norm": 13.6875,
"learning_rate": 8e-05,
"loss": 0.3452,
"step": 3520
},
{
"epoch": 20.17142857142857,
"grad_norm": 1.609375,
"learning_rate": 8e-05,
"loss": 0.2802,
"step": 3530
},
{
"epoch": 20.228571428571428,
"grad_norm": 1.125,
"learning_rate": 8e-05,
"loss": 0.2273,
"step": 3540
},
{
"epoch": 20.285714285714285,
"grad_norm": 2.921875,
"learning_rate": 8e-05,
"loss": 0.1685,
"step": 3550
},
{
"epoch": 20.34285714285714,
"grad_norm": 2.828125,
"learning_rate": 8e-05,
"loss": 0.3155,
"step": 3560
},
{
"epoch": 20.4,
"grad_norm": 13.125,
"learning_rate": 8e-05,
"loss": 0.3041,
"step": 3570
},
{
"epoch": 20.457142857142856,
"grad_norm": 13.0,
"learning_rate": 8e-05,
"loss": 0.2268,
"step": 3580
},
{
"epoch": 20.514285714285712,
"grad_norm": 3.46875,
"learning_rate": 8e-05,
"loss": 0.2387,
"step": 3590
},
{
"epoch": 20.571428571428573,
"grad_norm": 12.25,
"learning_rate": 8e-05,
"loss": 0.2591,
"step": 3600
},
{
"epoch": 20.62857142857143,
"grad_norm": 13.8125,
"learning_rate": 8e-05,
"loss": 0.2208,
"step": 3610
},
{
"epoch": 20.685714285714287,
"grad_norm": 1.921875,
"learning_rate": 8e-05,
"loss": 0.1838,
"step": 3620
},
{
"epoch": 20.742857142857144,
"grad_norm": 2.734375,
"learning_rate": 8e-05,
"loss": 0.2709,
"step": 3630
},
{
"epoch": 20.8,
"grad_norm": 13.9375,
"learning_rate": 8e-05,
"loss": 0.1863,
"step": 3640
},
{
"epoch": 20.857142857142858,
"grad_norm": 0.921875,
"learning_rate": 8e-05,
"loss": 0.1883,
"step": 3650
},
{
"epoch": 20.914285714285715,
"grad_norm": 1.140625,
"learning_rate": 8e-05,
"loss": 0.2428,
"step": 3660
},
{
"epoch": 20.97142857142857,
"grad_norm": 1.03125,
"learning_rate": 8e-05,
"loss": 0.1156,
"step": 3670
},
{
"epoch": 21.02857142857143,
"grad_norm": 12.4375,
"learning_rate": 8e-05,
"loss": 0.303,
"step": 3680
},
{
"epoch": 21.085714285714285,
"grad_norm": 12.0,
"learning_rate": 8e-05,
"loss": 0.284,
"step": 3690
},
{
"epoch": 21.142857142857142,
"grad_norm": 11.0625,
"learning_rate": 8e-05,
"loss": 0.2223,
"step": 3700
},
{
"epoch": 21.2,
"grad_norm": 12.625,
"learning_rate": 8e-05,
"loss": 0.1959,
"step": 3710
},
{
"epoch": 21.257142857142856,
"grad_norm": 13.375,
"learning_rate": 8e-05,
"loss": 0.2387,
"step": 3720
},
{
"epoch": 21.314285714285713,
"grad_norm": 1.3203125,
"learning_rate": 8e-05,
"loss": 0.1537,
"step": 3730
},
{
"epoch": 21.37142857142857,
"grad_norm": 1.296875,
"learning_rate": 8e-05,
"loss": 0.2123,
"step": 3740
},
{
"epoch": 21.428571428571427,
"grad_norm": 1.34375,
"learning_rate": 8e-05,
"loss": 0.2465,
"step": 3750
},
{
"epoch": 21.485714285714284,
"grad_norm": 13.0625,
"learning_rate": 8e-05,
"loss": 0.247,
"step": 3760
},
{
"epoch": 21.542857142857144,
"grad_norm": 1.0859375,
"learning_rate": 8e-05,
"loss": 0.2494,
"step": 3770
},
{
"epoch": 21.6,
"grad_norm": 2.296875,
"learning_rate": 8e-05,
"loss": 0.1628,
"step": 3780
},
{
"epoch": 21.65714285714286,
"grad_norm": 1.0703125,
"learning_rate": 8e-05,
"loss": 0.1647,
"step": 3790
},
{
"epoch": 21.714285714285715,
"grad_norm": 13.25,
"learning_rate": 8e-05,
"loss": 0.2749,
"step": 3800
},
{
"epoch": 21.771428571428572,
"grad_norm": 1.265625,
"learning_rate": 8e-05,
"loss": 0.2694,
"step": 3810
},
{
"epoch": 21.82857142857143,
"grad_norm": 12.375,
"learning_rate": 8e-05,
"loss": 0.3419,
"step": 3820
},
{
"epoch": 21.885714285714286,
"grad_norm": 1.546875,
"learning_rate": 8e-05,
"loss": 0.1667,
"step": 3830
},
{
"epoch": 21.942857142857143,
"grad_norm": 4.46875,
"learning_rate": 8e-05,
"loss": 0.3001,
"step": 3840
},
{
"epoch": 22.0,
"grad_norm": 0.76953125,
"learning_rate": 8e-05,
"loss": 0.2372,
"step": 3850
},
{
"epoch": 22.057142857142857,
"grad_norm": 12.625,
"learning_rate": 8e-05,
"loss": 0.2372,
"step": 3860
},
{
"epoch": 22.114285714285714,
"grad_norm": 1.890625,
"learning_rate": 8e-05,
"loss": 0.2566,
"step": 3870
},
{
"epoch": 22.17142857142857,
"grad_norm": 11.0625,
"learning_rate": 8e-05,
"loss": 0.1146,
"step": 3880
},
{
"epoch": 22.228571428571428,
"grad_norm": 1.28125,
"learning_rate": 8e-05,
"loss": 0.2228,
"step": 3890
},
{
"epoch": 22.285714285714285,
"grad_norm": 12.5,
"learning_rate": 8e-05,
"loss": 0.2839,
"step": 3900
},
{
"epoch": 22.34285714285714,
"grad_norm": 11.125,
"learning_rate": 8e-05,
"loss": 0.2664,
"step": 3910
},
{
"epoch": 22.4,
"grad_norm": 1.0625,
"learning_rate": 8e-05,
"loss": 0.2979,
"step": 3920
},
{
"epoch": 22.457142857142856,
"grad_norm": 12.9375,
"learning_rate": 8e-05,
"loss": 0.2924,
"step": 3930
},
{
"epoch": 22.514285714285712,
"grad_norm": 1.59375,
"learning_rate": 8e-05,
"loss": 0.201,
"step": 3940
},
{
"epoch": 22.571428571428573,
"grad_norm": 13.5,
"learning_rate": 8e-05,
"loss": 0.2611,
"step": 3950
},
{
"epoch": 22.62857142857143,
"grad_norm": 1.28125,
"learning_rate": 8e-05,
"loss": 0.1853,
"step": 3960
},
{
"epoch": 22.685714285714287,
"grad_norm": 2.125,
"learning_rate": 8e-05,
"loss": 0.1745,
"step": 3970
},
{
"epoch": 22.742857142857144,
"grad_norm": 1.0390625,
"learning_rate": 8e-05,
"loss": 0.1967,
"step": 3980
},
{
"epoch": 22.8,
"grad_norm": 11.1875,
"learning_rate": 8e-05,
"loss": 0.2362,
"step": 3990
},
{
"epoch": 22.857142857142858,
"grad_norm": 1.46875,
"learning_rate": 8e-05,
"loss": 0.1878,
"step": 4000
},
{
"epoch": 22.857142857142858,
"eval_loss": 8.872773170471191,
"eval_runtime": 7.8964,
"eval_samples_per_second": 63.32,
"eval_steps_per_second": 63.32,
"step": 4000
},
{
"epoch": 22.914285714285715,
"grad_norm": 13.125,
"learning_rate": 8e-05,
"loss": 0.3667,
"step": 4010
},
{
"epoch": 22.97142857142857,
"grad_norm": 1.0234375,
"learning_rate": 8e-05,
"loss": 0.2711,
"step": 4020
},
{
"epoch": 23.02857142857143,
"grad_norm": 12.125,
"learning_rate": 8e-05,
"loss": 0.2202,
"step": 4030
},
{
"epoch": 23.085714285714285,
"grad_norm": 11.75,
"learning_rate": 8e-05,
"loss": 0.23,
"step": 4040
},
{
"epoch": 23.142857142857142,
"grad_norm": 0.95703125,
"learning_rate": 8e-05,
"loss": 0.2866,
"step": 4050
},
{
"epoch": 23.2,
"grad_norm": 11.875,
"learning_rate": 8e-05,
"loss": 0.2623,
"step": 4060
},
{
"epoch": 23.257142857142856,
"grad_norm": 1.28125,
"learning_rate": 8e-05,
"loss": 0.1838,
"step": 4070
},
{
"epoch": 23.314285714285713,
"grad_norm": 10.125,
"learning_rate": 8e-05,
"loss": 0.2214,
"step": 4080
},
{
"epoch": 23.37142857142857,
"grad_norm": 1.25,
"learning_rate": 8e-05,
"loss": 0.3113,
"step": 4090
},
{
"epoch": 23.428571428571427,
"grad_norm": 0.90234375,
"learning_rate": 8e-05,
"loss": 0.1955,
"step": 4100
},
{
"epoch": 23.485714285714284,
"grad_norm": 1.5,
"learning_rate": 8e-05,
"loss": 0.1104,
"step": 4110
},
{
"epoch": 23.542857142857144,
"grad_norm": 13.625,
"learning_rate": 8e-05,
"loss": 0.2652,
"step": 4120
},
{
"epoch": 23.6,
"grad_norm": 0.9765625,
"learning_rate": 8e-05,
"loss": 0.3163,
"step": 4130
},
{
"epoch": 23.65714285714286,
"grad_norm": 12.8125,
"learning_rate": 8e-05,
"loss": 0.3631,
"step": 4140
},
{
"epoch": 23.714285714285715,
"grad_norm": 1.1171875,
"learning_rate": 8e-05,
"loss": 0.1891,
"step": 4150
},
{
"epoch": 23.771428571428572,
"grad_norm": 1.0859375,
"learning_rate": 8e-05,
"loss": 0.2511,
"step": 4160
},
{
"epoch": 23.82857142857143,
"grad_norm": 13.0625,
"learning_rate": 8e-05,
"loss": 0.2388,
"step": 4170
},
{
"epoch": 23.885714285714286,
"grad_norm": 2.015625,
"learning_rate": 8e-05,
"loss": 0.1704,
"step": 4180
},
{
"epoch": 23.942857142857143,
"grad_norm": 13.5,
"learning_rate": 8e-05,
"loss": 0.2243,
"step": 4190
},
{
"epoch": 24.0,
"grad_norm": 1.4765625,
"learning_rate": 8e-05,
"loss": 0.2253,
"step": 4200
},
{
"epoch": 24.057142857142857,
"grad_norm": 11.8125,
"learning_rate": 8e-05,
"loss": 0.1691,
"step": 4210
},
{
"epoch": 24.114285714285714,
"grad_norm": 11.3125,
"learning_rate": 8e-05,
"loss": 0.2168,
"step": 4220
},
{
"epoch": 24.17142857142857,
"grad_norm": 0.97265625,
"learning_rate": 8e-05,
"loss": 0.2336,
"step": 4230
},
{
"epoch": 24.228571428571428,
"grad_norm": 1.8671875,
"learning_rate": 8e-05,
"loss": 0.2058,
"step": 4240
},
{
"epoch": 24.285714285714285,
"grad_norm": 2.359375,
"learning_rate": 8e-05,
"loss": 0.1282,
"step": 4250
},
{
"epoch": 24.34285714285714,
"grad_norm": 1.359375,
"learning_rate": 8e-05,
"loss": 0.2631,
"step": 4260
},
{
"epoch": 24.4,
"grad_norm": 1.0703125,
"learning_rate": 8e-05,
"loss": 0.2444,
"step": 4270
},
{
"epoch": 24.457142857142856,
"grad_norm": 12.75,
"learning_rate": 8e-05,
"loss": 0.2777,
"step": 4280
},
{
"epoch": 24.514285714285712,
"grad_norm": 11.875,
"learning_rate": 8e-05,
"loss": 0.2395,
"step": 4290
},
{
"epoch": 24.571428571428573,
"grad_norm": 1.5625,
"learning_rate": 8e-05,
"loss": 0.2291,
"step": 4300
},
{
"epoch": 24.62857142857143,
"grad_norm": 2.375,
"learning_rate": 8e-05,
"loss": 0.3439,
"step": 4310
},
{
"epoch": 24.685714285714287,
"grad_norm": 14.0625,
"learning_rate": 8e-05,
"loss": 0.2126,
"step": 4320
},
{
"epoch": 24.742857142857144,
"grad_norm": 3.5625,
"learning_rate": 8e-05,
"loss": 0.2568,
"step": 4330
},
{
"epoch": 24.8,
"grad_norm": 13.1875,
"learning_rate": 8e-05,
"loss": 0.2513,
"step": 4340
},
{
"epoch": 24.857142857142858,
"grad_norm": 0.96875,
"learning_rate": 8e-05,
"loss": 0.2694,
"step": 4350
},
{
"epoch": 24.914285714285715,
"grad_norm": 1.890625,
"learning_rate": 8e-05,
"loss": 0.3128,
"step": 4360
},
{
"epoch": 24.97142857142857,
"grad_norm": 12.75,
"learning_rate": 8e-05,
"loss": 0.2509,
"step": 4370
},
{
"epoch": 25.02857142857143,
"grad_norm": 0.984375,
"learning_rate": 8e-05,
"loss": 0.2178,
"step": 4380
},
{
"epoch": 25.085714285714285,
"grad_norm": 13.25,
"learning_rate": 8e-05,
"loss": 0.2829,
"step": 4390
},
{
"epoch": 25.142857142857142,
"grad_norm": 12.6875,
"learning_rate": 8e-05,
"loss": 0.2785,
"step": 4400
},
{
"epoch": 25.2,
"grad_norm": 13.5625,
"learning_rate": 8e-05,
"loss": 0.2013,
"step": 4410
},
{
"epoch": 25.257142857142856,
"grad_norm": 12.5625,
"learning_rate": 8e-05,
"loss": 0.1823,
"step": 4420
},
{
"epoch": 25.314285714285713,
"grad_norm": 11.125,
"learning_rate": 8e-05,
"loss": 0.199,
"step": 4430
},
{
"epoch": 25.37142857142857,
"grad_norm": 0.84375,
"learning_rate": 8e-05,
"loss": 0.3073,
"step": 4440
},
{
"epoch": 25.428571428571427,
"grad_norm": 3.265625,
"learning_rate": 8e-05,
"loss": 0.2571,
"step": 4450
},
{
"epoch": 25.485714285714284,
"grad_norm": 1.546875,
"learning_rate": 8e-05,
"loss": 0.1958,
"step": 4460
},
{
"epoch": 25.542857142857144,
"grad_norm": 1.4765625,
"learning_rate": 8e-05,
"loss": 0.2315,
"step": 4470
},
{
"epoch": 25.6,
"grad_norm": 1.1171875,
"learning_rate": 8e-05,
"loss": 0.2363,
"step": 4480
},
{
"epoch": 25.65714285714286,
"grad_norm": 1.0703125,
"learning_rate": 8e-05,
"loss": 0.3238,
"step": 4490
},
{
"epoch": 25.714285714285715,
"grad_norm": 1.421875,
"learning_rate": 8e-05,
"loss": 0.2129,
"step": 4500
},
{
"epoch": 25.714285714285715,
"eval_loss": 9.115077018737793,
"eval_runtime": 7.8421,
"eval_samples_per_second": 63.759,
"eval_steps_per_second": 63.759,
"step": 4500
},
{
"epoch": 25.771428571428572,
"grad_norm": 0.875,
"learning_rate": 8e-05,
"loss": 0.1894,
"step": 4510
},
{
"epoch": 25.82857142857143,
"grad_norm": 1.3515625,
"learning_rate": 8e-05,
"loss": 0.1831,
"step": 4520
},
{
"epoch": 25.885714285714286,
"grad_norm": 12.6875,
"learning_rate": 8e-05,
"loss": 0.3132,
"step": 4530
},
{
"epoch": 25.942857142857143,
"grad_norm": 12.0,
"learning_rate": 8e-05,
"loss": 0.3162,
"step": 4540
},
{
"epoch": 26.0,
"grad_norm": 1.015625,
"learning_rate": 8e-05,
"loss": 0.2133,
"step": 4550
},
{
"epoch": 26.057142857142857,
"grad_norm": 2.0625,
"learning_rate": 8e-05,
"loss": 0.2219,
"step": 4560
},
{
"epoch": 26.114285714285714,
"grad_norm": 1.0703125,
"learning_rate": 8e-05,
"loss": 0.2104,
"step": 4570
},
{
"epoch": 26.17142857142857,
"grad_norm": 1.671875,
"learning_rate": 8e-05,
"loss": 0.2499,
"step": 4580
},
{
"epoch": 26.228571428571428,
"grad_norm": 0.88671875,
"learning_rate": 8e-05,
"loss": 0.2229,
"step": 4590
},
{
"epoch": 26.285714285714285,
"grad_norm": 14.0625,
"learning_rate": 8e-05,
"loss": 0.199,
"step": 4600
},
{
"epoch": 26.34285714285714,
"grad_norm": 14.25,
"learning_rate": 8e-05,
"loss": 0.2306,
"step": 4610
},
{
"epoch": 26.4,
"grad_norm": 13.0,
"learning_rate": 8e-05,
"loss": 0.2476,
"step": 4620
},
{
"epoch": 26.457142857142856,
"grad_norm": 12.875,
"learning_rate": 8e-05,
"loss": 0.2139,
"step": 4630
},
{
"epoch": 26.514285714285712,
"grad_norm": 1.9375,
"learning_rate": 8e-05,
"loss": 0.216,
"step": 4640
},
{
"epoch": 26.571428571428573,
"grad_norm": 0.8828125,
"learning_rate": 8e-05,
"loss": 0.1741,
"step": 4650
},
{
"epoch": 26.62857142857143,
"grad_norm": 1.0078125,
"learning_rate": 8e-05,
"loss": 0.2203,
"step": 4660
},
{
"epoch": 26.685714285714287,
"grad_norm": 11.5625,
"learning_rate": 8e-05,
"loss": 0.3017,
"step": 4670
},
{
"epoch": 26.742857142857144,
"grad_norm": 2.15625,
"learning_rate": 8e-05,
"loss": 0.2818,
"step": 4680
},
{
"epoch": 26.8,
"grad_norm": 12.1875,
"learning_rate": 8e-05,
"loss": 0.2493,
"step": 4690
},
{
"epoch": 26.857142857142858,
"grad_norm": 1.125,
"learning_rate": 8e-05,
"loss": 0.2621,
"step": 4700
},
{
"epoch": 26.914285714285715,
"grad_norm": 0.87109375,
"learning_rate": 8e-05,
"loss": 0.2187,
"step": 4710
},
{
"epoch": 26.97142857142857,
"grad_norm": 14.375,
"learning_rate": 8e-05,
"loss": 0.3442,
"step": 4720
},
{
"epoch": 27.02857142857143,
"grad_norm": 2.5,
"learning_rate": 8e-05,
"loss": 0.1336,
"step": 4730
},
{
"epoch": 27.085714285714285,
"grad_norm": 0.9765625,
"learning_rate": 8e-05,
"loss": 0.2898,
"step": 4740
},
{
"epoch": 27.142857142857142,
"grad_norm": 12.3125,
"learning_rate": 8e-05,
"loss": 0.3381,
"step": 4750
},
{
"epoch": 27.2,
"grad_norm": 0.890625,
"learning_rate": 8e-05,
"loss": 0.1815,
"step": 4760
},
{
"epoch": 27.257142857142856,
"grad_norm": 10.5625,
"learning_rate": 8e-05,
"loss": 0.1338,
"step": 4770
},
{
"epoch": 27.314285714285713,
"grad_norm": 1.7734375,
"learning_rate": 8e-05,
"loss": 0.2167,
"step": 4780
},
{
"epoch": 27.37142857142857,
"grad_norm": 1.28125,
"learning_rate": 8e-05,
"loss": 0.253,
"step": 4790
},
{
"epoch": 27.428571428571427,
"grad_norm": 10.875,
"learning_rate": 8e-05,
"loss": 0.2037,
"step": 4800
},
{
"epoch": 27.485714285714284,
"grad_norm": 0.734375,
"learning_rate": 8e-05,
"loss": 0.2611,
"step": 4810
},
{
"epoch": 27.542857142857144,
"grad_norm": 13.0,
"learning_rate": 8e-05,
"loss": 0.1021,
"step": 4820
},
{
"epoch": 27.6,
"grad_norm": 12.4375,
"learning_rate": 8e-05,
"loss": 0.2804,
"step": 4830
},
{
"epoch": 27.65714285714286,
"grad_norm": 1.7421875,
"learning_rate": 8e-05,
"loss": 0.2014,
"step": 4840
},
{
"epoch": 27.714285714285715,
"grad_norm": 9.5625,
"learning_rate": 8e-05,
"loss": 0.1588,
"step": 4850
},
{
"epoch": 27.771428571428572,
"grad_norm": 2.4375,
"learning_rate": 8e-05,
"loss": 0.3445,
"step": 4860
},
{
"epoch": 27.82857142857143,
"grad_norm": 1.7578125,
"learning_rate": 8e-05,
"loss": 0.1854,
"step": 4870
},
{
"epoch": 27.885714285714286,
"grad_norm": 11.75,
"learning_rate": 8e-05,
"loss": 0.2785,
"step": 4880
},
{
"epoch": 27.942857142857143,
"grad_norm": 13.1875,
"learning_rate": 8e-05,
"loss": 0.2414,
"step": 4890
},
{
"epoch": 28.0,
"grad_norm": 12.5,
"learning_rate": 8e-05,
"loss": 0.3524,
"step": 4900
}
],
"logging_steps": 10,
"max_steps": 7000,
"num_input_tokens_seen": 0,
"num_train_epochs": 40,
"save_steps": 175,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3134793563897856.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}