Grogros's picture
Training in progress, step 2000, checkpoint
8ab1ca3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 34.014,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005,
"grad_norm": 824.0,
"learning_rate": 1.0000000000000002e-06,
"loss": 7.1435,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 81.0,
"learning_rate": 2.0000000000000003e-06,
"loss": 6.2505,
"step": 20
},
{
"epoch": 0.015,
"grad_norm": 68.0,
"learning_rate": 3e-06,
"loss": 5.427,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 28.0,
"learning_rate": 4.000000000000001e-06,
"loss": 4.7885,
"step": 40
},
{
"epoch": 0.025,
"grad_norm": 18.0,
"learning_rate": 5e-06,
"loss": 4.6918,
"step": 50
},
{
"epoch": 1.001,
"grad_norm": 20.5,
"learning_rate": 6e-06,
"loss": 4.5434,
"step": 60
},
{
"epoch": 1.006,
"grad_norm": 15.25,
"learning_rate": 7e-06,
"loss": 4.2003,
"step": 70
},
{
"epoch": 1.011,
"grad_norm": 59.0,
"learning_rate": 8.000000000000001e-06,
"loss": 3.9924,
"step": 80
},
{
"epoch": 1.016,
"grad_norm": 15.0625,
"learning_rate": 9e-06,
"loss": 4.1841,
"step": 90
},
{
"epoch": 1.021,
"grad_norm": 12.9375,
"learning_rate": 1e-05,
"loss": 3.9335,
"step": 100
},
{
"epoch": 1.026,
"grad_norm": 13.8125,
"learning_rate": 1.1000000000000001e-05,
"loss": 3.7487,
"step": 110
},
{
"epoch": 2.002,
"grad_norm": 13.875,
"learning_rate": 1.2e-05,
"loss": 3.5918,
"step": 120
},
{
"epoch": 2.007,
"grad_norm": 13.6875,
"learning_rate": 1.3000000000000001e-05,
"loss": 3.2549,
"step": 130
},
{
"epoch": 2.012,
"grad_norm": 13.75,
"learning_rate": 1.4e-05,
"loss": 3.1041,
"step": 140
},
{
"epoch": 2.017,
"grad_norm": 15.5,
"learning_rate": 1.5000000000000002e-05,
"loss": 2.9491,
"step": 150
},
{
"epoch": 2.022,
"grad_norm": 11.1875,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.7196,
"step": 160
},
{
"epoch": 2.027,
"grad_norm": 26.75,
"learning_rate": 1.7e-05,
"loss": 2.4995,
"step": 170
},
{
"epoch": 3.003,
"grad_norm": 13.3125,
"learning_rate": 1.8e-05,
"loss": 2.3773,
"step": 180
},
{
"epoch": 3.008,
"grad_norm": 15.0625,
"learning_rate": 1.9e-05,
"loss": 2.4258,
"step": 190
},
{
"epoch": 3.013,
"grad_norm": 16.75,
"learning_rate": 2e-05,
"loss": 1.905,
"step": 200
},
{
"epoch": 3.018,
"grad_norm": 11.875,
"learning_rate": 1.9998476951563914e-05,
"loss": 1.8328,
"step": 210
},
{
"epoch": 3.023,
"grad_norm": 12.1875,
"learning_rate": 1.999390827019096e-05,
"loss": 1.5704,
"step": 220
},
{
"epoch": 3.028,
"grad_norm": 13.8125,
"learning_rate": 1.9986295347545738e-05,
"loss": 1.4726,
"step": 230
},
{
"epoch": 4.004,
"grad_norm": 20.875,
"learning_rate": 1.9975640502598243e-05,
"loss": 1.288,
"step": 240
},
{
"epoch": 4.009,
"grad_norm": 11.8125,
"learning_rate": 1.9961946980917457e-05,
"loss": 1.24,
"step": 250
},
{
"epoch": 4.014,
"grad_norm": 18.75,
"learning_rate": 1.9945218953682736e-05,
"loss": 1.0592,
"step": 260
},
{
"epoch": 4.019,
"grad_norm": 16.375,
"learning_rate": 1.9925461516413224e-05,
"loss": 1.0111,
"step": 270
},
{
"epoch": 4.024,
"grad_norm": 12.5625,
"learning_rate": 1.9902680687415704e-05,
"loss": 0.9064,
"step": 280
},
{
"epoch": 4.029,
"grad_norm": 10.6875,
"learning_rate": 1.9876883405951378e-05,
"loss": 0.8366,
"step": 290
},
{
"epoch": 5.005,
"grad_norm": 10.5625,
"learning_rate": 1.9848077530122083e-05,
"loss": 0.7961,
"step": 300
},
{
"epoch": 5.01,
"grad_norm": 26.75,
"learning_rate": 1.9816271834476642e-05,
"loss": 0.7599,
"step": 310
},
{
"epoch": 5.015,
"grad_norm": 9.1875,
"learning_rate": 1.9781476007338058e-05,
"loss": 0.6633,
"step": 320
},
{
"epoch": 5.02,
"grad_norm": 9.625,
"learning_rate": 1.9743700647852356e-05,
"loss": 0.5835,
"step": 330
},
{
"epoch": 5.025,
"grad_norm": 10.5625,
"learning_rate": 1.9702957262759964e-05,
"loss": 0.5481,
"step": 340
},
{
"epoch": 6.001,
"grad_norm": 13.5,
"learning_rate": 1.9659258262890683e-05,
"loss": 0.543,
"step": 350
},
{
"epoch": 6.006,
"grad_norm": 9.1875,
"learning_rate": 1.961261695938319e-05,
"loss": 0.478,
"step": 360
},
{
"epoch": 6.011,
"grad_norm": 6.90625,
"learning_rate": 1.9563047559630356e-05,
"loss": 0.4664,
"step": 370
},
{
"epoch": 6.016,
"grad_norm": 9.1875,
"learning_rate": 1.9510565162951538e-05,
"loss": 0.4349,
"step": 380
},
{
"epoch": 6.021,
"grad_norm": 7.9375,
"learning_rate": 1.945518575599317e-05,
"loss": 0.4401,
"step": 390
},
{
"epoch": 6.026,
"grad_norm": 8.8125,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.375,
"step": 400
},
{
"epoch": 7.002,
"grad_norm": 10.625,
"learning_rate": 1.9335804264972018e-05,
"loss": 0.3819,
"step": 410
},
{
"epoch": 7.007,
"grad_norm": 7.09375,
"learning_rate": 1.9271838545667876e-05,
"loss": 0.364,
"step": 420
},
{
"epoch": 7.012,
"grad_norm": 9.75,
"learning_rate": 1.9205048534524405e-05,
"loss": 0.3542,
"step": 430
},
{
"epoch": 7.017,
"grad_norm": 8.1875,
"learning_rate": 1.913545457642601e-05,
"loss": 0.3226,
"step": 440
},
{
"epoch": 7.022,
"grad_norm": 6.5,
"learning_rate": 1.9063077870366504e-05,
"loss": 0.3167,
"step": 450
},
{
"epoch": 7.027,
"grad_norm": 8.875,
"learning_rate": 1.8987940462991673e-05,
"loss": 0.2857,
"step": 460
},
{
"epoch": 8.003,
"grad_norm": 7.375,
"learning_rate": 1.891006524188368e-05,
"loss": 0.2994,
"step": 470
},
{
"epoch": 8.008,
"grad_norm": 8.375,
"learning_rate": 1.8829475928589272e-05,
"loss": 0.2817,
"step": 480
},
{
"epoch": 8.013,
"grad_norm": 6.8125,
"learning_rate": 1.874619707139396e-05,
"loss": 0.2868,
"step": 490
},
{
"epoch": 8.018,
"grad_norm": 5.6875,
"learning_rate": 1.866025403784439e-05,
"loss": 0.2581,
"step": 500
},
{
"epoch": 8.023,
"grad_norm": 5.875,
"learning_rate": 1.8571673007021124e-05,
"loss": 0.254,
"step": 510
},
{
"epoch": 8.028,
"grad_norm": 6.875,
"learning_rate": 1.848048096156426e-05,
"loss": 0.2543,
"step": 520
},
{
"epoch": 9.004,
"grad_norm": 6.1875,
"learning_rate": 1.8386705679454243e-05,
"loss": 0.2272,
"step": 530
},
{
"epoch": 9.009,
"grad_norm": 6.96875,
"learning_rate": 1.8290375725550417e-05,
"loss": 0.2322,
"step": 540
},
{
"epoch": 9.014,
"grad_norm": 4.84375,
"learning_rate": 1.819152044288992e-05,
"loss": 0.2201,
"step": 550
},
{
"epoch": 9.019,
"grad_norm": 7.96875,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.2236,
"step": 560
},
{
"epoch": 9.024,
"grad_norm": 4.90625,
"learning_rate": 1.798635510047293e-05,
"loss": 0.2062,
"step": 570
},
{
"epoch": 9.029,
"grad_norm": 4.59375,
"learning_rate": 1.788010753606722e-05,
"loss": 0.2069,
"step": 580
},
{
"epoch": 10.005,
"grad_norm": 4.5625,
"learning_rate": 1.777145961456971e-05,
"loss": 0.1945,
"step": 590
},
{
"epoch": 10.01,
"grad_norm": 7.46875,
"learning_rate": 1.766044443118978e-05,
"loss": 0.2031,
"step": 600
},
{
"epoch": 10.015,
"grad_norm": 4.375,
"learning_rate": 1.7547095802227723e-05,
"loss": 0.1885,
"step": 610
},
{
"epoch": 10.02,
"grad_norm": 3.9375,
"learning_rate": 1.7431448254773943e-05,
"loss": 0.1831,
"step": 620
},
{
"epoch": 10.025,
"grad_norm": 5.84375,
"learning_rate": 1.7313537016191706e-05,
"loss": 0.1771,
"step": 630
},
{
"epoch": 11.001,
"grad_norm": 5.1875,
"learning_rate": 1.7193398003386514e-05,
"loss": 0.1791,
"step": 640
},
{
"epoch": 11.006,
"grad_norm": 4.8125,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.1731,
"step": 650
},
{
"epoch": 11.011,
"grad_norm": 3.578125,
"learning_rate": 1.6946583704589973e-05,
"loss": 0.1716,
"step": 660
},
{
"epoch": 11.016,
"grad_norm": 7.875,
"learning_rate": 1.6819983600624986e-05,
"loss": 0.1665,
"step": 670
},
{
"epoch": 11.021,
"grad_norm": 3.984375,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.1701,
"step": 680
},
{
"epoch": 11.026,
"grad_norm": 4.40625,
"learning_rate": 1.6560590289905074e-05,
"loss": 0.1796,
"step": 690
},
{
"epoch": 12.002,
"grad_norm": 12.9375,
"learning_rate": 1.6427876096865394e-05,
"loss": 0.1782,
"step": 700
},
{
"epoch": 12.007,
"grad_norm": 2.96875,
"learning_rate": 1.6293203910498375e-05,
"loss": 0.1539,
"step": 710
},
{
"epoch": 12.012,
"grad_norm": 4.34375,
"learning_rate": 1.6156614753256583e-05,
"loss": 0.1549,
"step": 720
},
{
"epoch": 12.017,
"grad_norm": 4.5625,
"learning_rate": 1.6018150231520486e-05,
"loss": 0.1505,
"step": 730
},
{
"epoch": 12.022,
"grad_norm": 3.5,
"learning_rate": 1.5877852522924733e-05,
"loss": 0.1533,
"step": 740
},
{
"epoch": 12.027,
"grad_norm": 4.6875,
"learning_rate": 1.573576436351046e-05,
"loss": 0.1468,
"step": 750
},
{
"epoch": 13.003,
"grad_norm": 3.296875,
"learning_rate": 1.5591929034707468e-05,
"loss": 0.1409,
"step": 760
},
{
"epoch": 13.008,
"grad_norm": 5.65625,
"learning_rate": 1.5446390350150272e-05,
"loss": 0.1363,
"step": 770
},
{
"epoch": 13.013,
"grad_norm": 4.1875,
"learning_rate": 1.529919264233205e-05,
"loss": 0.1422,
"step": 780
},
{
"epoch": 13.018,
"grad_norm": 2.53125,
"learning_rate": 1.5150380749100545e-05,
"loss": 0.1366,
"step": 790
},
{
"epoch": 13.023,
"grad_norm": 3.546875,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.138,
"step": 800
},
{
"epoch": 13.028,
"grad_norm": 4.625,
"learning_rate": 1.4848096202463373e-05,
"loss": 0.1365,
"step": 810
},
{
"epoch": 14.004,
"grad_norm": 4.875,
"learning_rate": 1.469471562785891e-05,
"loss": 0.1323,
"step": 820
},
{
"epoch": 14.009,
"grad_norm": 4.6875,
"learning_rate": 1.4539904997395468e-05,
"loss": 0.1309,
"step": 830
},
{
"epoch": 14.014,
"grad_norm": 2.25,
"learning_rate": 1.4383711467890776e-05,
"loss": 0.1335,
"step": 840
},
{
"epoch": 14.019,
"grad_norm": 4.125,
"learning_rate": 1.4226182617406996e-05,
"loss": 0.127,
"step": 850
},
{
"epoch": 14.024,
"grad_norm": 3.390625,
"learning_rate": 1.4067366430758004e-05,
"loss": 0.1247,
"step": 860
},
{
"epoch": 14.029,
"grad_norm": 2.9375,
"learning_rate": 1.3907311284892737e-05,
"loss": 0.1297,
"step": 870
},
{
"epoch": 15.005,
"grad_norm": 2.140625,
"learning_rate": 1.3746065934159123e-05,
"loss": 0.1238,
"step": 880
},
{
"epoch": 15.01,
"grad_norm": 3.375,
"learning_rate": 1.3583679495453e-05,
"loss": 0.1249,
"step": 890
},
{
"epoch": 15.015,
"grad_norm": 3.0,
"learning_rate": 1.342020143325669e-05,
"loss": 0.1228,
"step": 900
},
{
"epoch": 15.02,
"grad_norm": 2.1875,
"learning_rate": 1.3255681544571568e-05,
"loss": 0.1173,
"step": 910
},
{
"epoch": 15.025,
"grad_norm": 1.96875,
"learning_rate": 1.3090169943749475e-05,
"loss": 0.119,
"step": 920
},
{
"epoch": 16.001,
"grad_norm": 2.796875,
"learning_rate": 1.2923717047227368e-05,
"loss": 0.1166,
"step": 930
},
{
"epoch": 16.006,
"grad_norm": 2.4375,
"learning_rate": 1.2756373558169992e-05,
"loss": 0.1152,
"step": 940
},
{
"epoch": 16.011,
"grad_norm": 2.359375,
"learning_rate": 1.2588190451025209e-05,
"loss": 0.1161,
"step": 950
},
{
"epoch": 16.016,
"grad_norm": 2.90625,
"learning_rate": 1.2419218955996677e-05,
"loss": 0.1118,
"step": 960
},
{
"epoch": 16.021,
"grad_norm": 2.5,
"learning_rate": 1.2249510543438652e-05,
"loss": 0.1131,
"step": 970
},
{
"epoch": 16.026,
"grad_norm": 2.03125,
"learning_rate": 1.2079116908177592e-05,
"loss": 0.1088,
"step": 980
},
{
"epoch": 17.002,
"grad_norm": 3.109375,
"learning_rate": 1.190808995376545e-05,
"loss": 0.108,
"step": 990
},
{
"epoch": 17.007,
"grad_norm": 2.375,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.1079,
"step": 1000
},
{
"epoch": 17.012,
"grad_norm": 2.96875,
"learning_rate": 1.156434465040231e-05,
"loss": 0.1107,
"step": 1010
},
{
"epoch": 17.017,
"grad_norm": 2.984375,
"learning_rate": 1.1391731009600655e-05,
"loss": 0.1067,
"step": 1020
},
{
"epoch": 17.022,
"grad_norm": 1.8125,
"learning_rate": 1.1218693434051475e-05,
"loss": 0.1053,
"step": 1030
},
{
"epoch": 17.027,
"grad_norm": 4.1875,
"learning_rate": 1.1045284632676535e-05,
"loss": 0.103,
"step": 1040
},
{
"epoch": 18.003,
"grad_norm": 2.0625,
"learning_rate": 1.0871557427476585e-05,
"loss": 0.1066,
"step": 1050
},
{
"epoch": 18.008,
"grad_norm": 2.203125,
"learning_rate": 1.0697564737441254e-05,
"loss": 0.1009,
"step": 1060
},
{
"epoch": 18.013,
"grad_norm": 2.921875,
"learning_rate": 1.0523359562429441e-05,
"loss": 0.0999,
"step": 1070
},
{
"epoch": 18.018,
"grad_norm": 1.8984375,
"learning_rate": 1.0348994967025012e-05,
"loss": 0.096,
"step": 1080
},
{
"epoch": 18.023,
"grad_norm": 1.9375,
"learning_rate": 1.0174524064372837e-05,
"loss": 0.0981,
"step": 1090
},
{
"epoch": 18.028,
"grad_norm": 2.578125,
"learning_rate": 1e-05,
"loss": 0.1008,
"step": 1100
},
{
"epoch": 19.004,
"grad_norm": 2.5625,
"learning_rate": 9.825475935627165e-06,
"loss": 0.0965,
"step": 1110
},
{
"epoch": 19.009,
"grad_norm": 1.8671875,
"learning_rate": 9.651005032974994e-06,
"loss": 0.0976,
"step": 1120
},
{
"epoch": 19.014,
"grad_norm": 2.234375,
"learning_rate": 9.476640437570562e-06,
"loss": 0.0956,
"step": 1130
},
{
"epoch": 19.019,
"grad_norm": 2.9375,
"learning_rate": 9.302435262558748e-06,
"loss": 0.0949,
"step": 1140
},
{
"epoch": 19.024,
"grad_norm": 1.5,
"learning_rate": 9.128442572523418e-06,
"loss": 0.0912,
"step": 1150
},
{
"epoch": 19.029,
"grad_norm": 1.734375,
"learning_rate": 8.954715367323468e-06,
"loss": 0.0925,
"step": 1160
},
{
"epoch": 20.005,
"grad_norm": 1.3828125,
"learning_rate": 8.781306565948528e-06,
"loss": 0.0909,
"step": 1170
},
{
"epoch": 20.01,
"grad_norm": 1.6171875,
"learning_rate": 8.60826899039935e-06,
"loss": 0.0934,
"step": 1180
},
{
"epoch": 20.015,
"grad_norm": 2.0625,
"learning_rate": 8.43565534959769e-06,
"loss": 0.0914,
"step": 1190
},
{
"epoch": 20.02,
"grad_norm": 1.15625,
"learning_rate": 8.263518223330698e-06,
"loss": 0.0903,
"step": 1200
},
{
"epoch": 20.025,
"grad_norm": 1.890625,
"learning_rate": 8.091910046234552e-06,
"loss": 0.0883,
"step": 1210
},
{
"epoch": 21.001,
"grad_norm": 1.890625,
"learning_rate": 7.92088309182241e-06,
"loss": 0.0914,
"step": 1220
},
{
"epoch": 21.006,
"grad_norm": 1.84375,
"learning_rate": 7.750489456561351e-06,
"loss": 0.0899,
"step": 1230
},
{
"epoch": 21.011,
"grad_norm": 2.640625,
"learning_rate": 7.580781044003324e-06,
"loss": 0.0886,
"step": 1240
},
{
"epoch": 21.016,
"grad_norm": 2.15625,
"learning_rate": 7.411809548974792e-06,
"loss": 0.0895,
"step": 1250
},
{
"epoch": 21.021,
"grad_norm": 1.546875,
"learning_rate": 7.243626441830009e-06,
"loss": 0.088,
"step": 1260
},
{
"epoch": 21.026,
"grad_norm": 1.828125,
"learning_rate": 7.076282952772634e-06,
"loss": 0.0856,
"step": 1270
},
{
"epoch": 22.002,
"grad_norm": 1.7109375,
"learning_rate": 6.909830056250527e-06,
"loss": 0.0902,
"step": 1280
},
{
"epoch": 22.007,
"grad_norm": 1.46875,
"learning_rate": 6.744318455428436e-06,
"loss": 0.0852,
"step": 1290
},
{
"epoch": 22.012,
"grad_norm": 1.9765625,
"learning_rate": 6.579798566743314e-06,
"loss": 0.0869,
"step": 1300
},
{
"epoch": 22.017,
"grad_norm": 2.359375,
"learning_rate": 6.4163205045469975e-06,
"loss": 0.0859,
"step": 1310
},
{
"epoch": 22.022,
"grad_norm": 1.2890625,
"learning_rate": 6.25393406584088e-06,
"loss": 0.0858,
"step": 1320
},
{
"epoch": 22.027,
"grad_norm": 1.6640625,
"learning_rate": 6.092688715107265e-06,
"loss": 0.0851,
"step": 1330
},
{
"epoch": 23.003,
"grad_norm": 1.671875,
"learning_rate": 5.932633569242e-06,
"loss": 0.0882,
"step": 1340
},
{
"epoch": 23.008,
"grad_norm": 2.234375,
"learning_rate": 5.773817382593008e-06,
"loss": 0.0827,
"step": 1350
},
{
"epoch": 23.013,
"grad_norm": 1.125,
"learning_rate": 5.616288532109225e-06,
"loss": 0.0848,
"step": 1360
},
{
"epoch": 23.018,
"grad_norm": 1.4296875,
"learning_rate": 5.460095002604533e-06,
"loss": 0.0833,
"step": 1370
},
{
"epoch": 23.023,
"grad_norm": 1.703125,
"learning_rate": 5.305284372141095e-06,
"loss": 0.0839,
"step": 1380
},
{
"epoch": 23.028,
"grad_norm": 2.1875,
"learning_rate": 5.151903797536631e-06,
"loss": 0.0848,
"step": 1390
},
{
"epoch": 24.004,
"grad_norm": 1.7421875,
"learning_rate": 5.000000000000003e-06,
"loss": 0.0858,
"step": 1400
},
{
"epoch": 24.009,
"grad_norm": 1.265625,
"learning_rate": 4.849619250899458e-06,
"loss": 0.082,
"step": 1410
},
{
"epoch": 24.014,
"grad_norm": 1.953125,
"learning_rate": 4.700807357667953e-06,
"loss": 0.084,
"step": 1420
},
{
"epoch": 24.019,
"grad_norm": 1.75,
"learning_rate": 4.5536096498497295e-06,
"loss": 0.0843,
"step": 1430
},
{
"epoch": 24.024,
"grad_norm": 2.046875,
"learning_rate": 4.408070965292534e-06,
"loss": 0.0819,
"step": 1440
},
{
"epoch": 24.029,
"grad_norm": 1.6015625,
"learning_rate": 4.264235636489542e-06,
"loss": 0.0837,
"step": 1450
},
{
"epoch": 25.005,
"grad_norm": 1.296875,
"learning_rate": 4.12214747707527e-06,
"loss": 0.0834,
"step": 1460
},
{
"epoch": 25.01,
"grad_norm": 2.15625,
"learning_rate": 3.981849768479516e-06,
"loss": 0.0834,
"step": 1470
},
{
"epoch": 25.015,
"grad_norm": 1.6484375,
"learning_rate": 3.8433852467434175e-06,
"loss": 0.0813,
"step": 1480
},
{
"epoch": 25.02,
"grad_norm": 1.0234375,
"learning_rate": 3.7067960895016277e-06,
"loss": 0.081,
"step": 1490
},
{
"epoch": 25.025,
"grad_norm": 1.390625,
"learning_rate": 3.5721239031346067e-06,
"loss": 0.0821,
"step": 1500
},
{
"epoch": 26.001,
"grad_norm": 2.625,
"learning_rate": 3.4394097100949286e-06,
"loss": 0.0821,
"step": 1510
},
{
"epoch": 26.006,
"grad_norm": 1.171875,
"learning_rate": 3.308693936411421e-06,
"loss": 0.0833,
"step": 1520
},
{
"epoch": 26.011,
"grad_norm": 1.015625,
"learning_rate": 3.1800163993750166e-06,
"loss": 0.0814,
"step": 1530
},
{
"epoch": 26.016,
"grad_norm": 1.8515625,
"learning_rate": 3.0534162954100264e-06,
"loss": 0.0806,
"step": 1540
},
{
"epoch": 26.021,
"grad_norm": 1.0859375,
"learning_rate": 2.9289321881345257e-06,
"loss": 0.0802,
"step": 1550
},
{
"epoch": 26.026,
"grad_norm": 1.2421875,
"learning_rate": 2.8066019966134907e-06,
"loss": 0.0815,
"step": 1560
},
{
"epoch": 27.002,
"grad_norm": 1.2890625,
"learning_rate": 2.6864629838082957e-06,
"loss": 0.0814,
"step": 1570
},
{
"epoch": 27.007,
"grad_norm": 1.0703125,
"learning_rate": 2.5685517452260566e-06,
"loss": 0.0811,
"step": 1580
},
{
"epoch": 27.012,
"grad_norm": 1.640625,
"learning_rate": 2.45290419777228e-06,
"loss": 0.0815,
"step": 1590
},
{
"epoch": 27.017,
"grad_norm": 1.015625,
"learning_rate": 2.339555568810221e-06,
"loss": 0.0792,
"step": 1600
},
{
"epoch": 27.022,
"grad_norm": 1.171875,
"learning_rate": 2.2285403854302912e-06,
"loss": 0.0811,
"step": 1610
},
{
"epoch": 27.027,
"grad_norm": 1.59375,
"learning_rate": 2.119892463932781e-06,
"loss": 0.0808,
"step": 1620
},
{
"epoch": 28.003,
"grad_norm": 1.421875,
"learning_rate": 2.013644899527074e-06,
"loss": 0.081,
"step": 1630
},
{
"epoch": 28.008,
"grad_norm": 2.046875,
"learning_rate": 1.9098300562505266e-06,
"loss": 0.0804,
"step": 1640
},
{
"epoch": 28.013,
"grad_norm": 2.0,
"learning_rate": 1.808479557110081e-06,
"loss": 0.0814,
"step": 1650
},
{
"epoch": 28.018,
"grad_norm": 2.5,
"learning_rate": 1.709624274449584e-06,
"loss": 0.0798,
"step": 1660
},
{
"epoch": 28.023,
"grad_norm": 1.25,
"learning_rate": 1.6132943205457607e-06,
"loss": 0.0801,
"step": 1670
},
{
"epoch": 28.028,
"grad_norm": 1.5703125,
"learning_rate": 1.5195190384357405e-06,
"loss": 0.0815,
"step": 1680
},
{
"epoch": 29.004,
"grad_norm": 1.5078125,
"learning_rate": 1.4283269929788779e-06,
"loss": 0.0798,
"step": 1690
},
{
"epoch": 29.009,
"grad_norm": 1.0703125,
"learning_rate": 1.339745962155613e-06,
"loss": 0.0791,
"step": 1700
},
{
"epoch": 29.014,
"grad_norm": 1.515625,
"learning_rate": 1.2538029286060428e-06,
"loss": 0.0806,
"step": 1710
},
{
"epoch": 29.019,
"grad_norm": 1.453125,
"learning_rate": 1.1705240714107301e-06,
"loss": 0.0805,
"step": 1720
},
{
"epoch": 29.024,
"grad_norm": 1.875,
"learning_rate": 1.0899347581163222e-06,
"loss": 0.0796,
"step": 1730
},
{
"epoch": 29.029,
"grad_norm": 1.21875,
"learning_rate": 1.012059537008332e-06,
"loss": 0.0804,
"step": 1740
},
{
"epoch": 30.005,
"grad_norm": 1.0859375,
"learning_rate": 9.369221296335007e-07,
"loss": 0.0802,
"step": 1750
},
{
"epoch": 30.01,
"grad_norm": 1.546875,
"learning_rate": 8.645454235739903e-07,
"loss": 0.0801,
"step": 1760
},
{
"epoch": 30.015,
"grad_norm": 0.8984375,
"learning_rate": 7.949514654755963e-07,
"loss": 0.0798,
"step": 1770
},
{
"epoch": 30.02,
"grad_norm": 1.109375,
"learning_rate": 7.281614543321269e-07,
"loss": 0.0814,
"step": 1780
},
{
"epoch": 30.025,
"grad_norm": 1.328125,
"learning_rate": 6.641957350279838e-07,
"loss": 0.0789,
"step": 1790
},
{
"epoch": 31.001,
"grad_norm": 1.4296875,
"learning_rate": 6.030737921409169e-07,
"loss": 0.0792,
"step": 1800
},
{
"epoch": 31.006,
"grad_norm": 1.3046875,
"learning_rate": 5.448142440068316e-07,
"loss": 0.0798,
"step": 1810
},
{
"epoch": 31.011,
"grad_norm": 0.9140625,
"learning_rate": 4.894348370484648e-07,
"loss": 0.0812,
"step": 1820
},
{
"epoch": 31.016,
"grad_norm": 1.453125,
"learning_rate": 4.3695244036964567e-07,
"loss": 0.0792,
"step": 1830
},
{
"epoch": 31.021,
"grad_norm": 1.0234375,
"learning_rate": 3.8738304061681107e-07,
"loss": 0.0786,
"step": 1840
},
{
"epoch": 31.026,
"grad_norm": 1.71875,
"learning_rate": 3.4074173710931804e-07,
"loss": 0.0797,
"step": 1850
},
{
"epoch": 32.002,
"grad_norm": 2.859375,
"learning_rate": 2.970427372400353e-07,
"loss": 0.0794,
"step": 1860
},
{
"epoch": 32.007,
"grad_norm": 1.0078125,
"learning_rate": 2.5629935214764866e-07,
"loss": 0.0788,
"step": 1870
},
{
"epoch": 32.012,
"grad_norm": 1.5859375,
"learning_rate": 2.1852399266194312e-07,
"loss": 0.0806,
"step": 1880
},
{
"epoch": 32.017,
"grad_norm": 1.0703125,
"learning_rate": 1.8372816552336025e-07,
"loss": 0.0794,
"step": 1890
},
{
"epoch": 32.022,
"grad_norm": 0.95703125,
"learning_rate": 1.519224698779198e-07,
"loss": 0.0802,
"step": 1900
},
{
"epoch": 32.027,
"grad_norm": 1.2734375,
"learning_rate": 1.231165940486234e-07,
"loss": 0.0793,
"step": 1910
},
{
"epoch": 33.003,
"grad_norm": 1.578125,
"learning_rate": 9.731931258429638e-08,
"loss": 0.08,
"step": 1920
},
{
"epoch": 33.008,
"grad_norm": 2.09375,
"learning_rate": 7.453848358678018e-08,
"loss": 0.0797,
"step": 1930
},
{
"epoch": 33.013,
"grad_norm": 1.6953125,
"learning_rate": 5.4781046317267103e-08,
"loss": 0.0807,
"step": 1940
},
{
"epoch": 33.018,
"grad_norm": 1.6015625,
"learning_rate": 3.805301908254455e-08,
"loss": 0.0784,
"step": 1950
},
{
"epoch": 33.023,
"grad_norm": 1.7109375,
"learning_rate": 2.4359497401758026e-08,
"loss": 0.0793,
"step": 1960
},
{
"epoch": 33.028,
"grad_norm": 1.6796875,
"learning_rate": 1.370465245426167e-08,
"loss": 0.0807,
"step": 1970
},
{
"epoch": 34.004,
"grad_norm": 1.265625,
"learning_rate": 6.091729809042379e-09,
"loss": 0.0795,
"step": 1980
},
{
"epoch": 34.009,
"grad_norm": 1.6796875,
"learning_rate": 1.5230484360873043e-09,
"loss": 0.0798,
"step": 1990
},
{
"epoch": 34.014,
"grad_norm": 0.984375,
"learning_rate": 0.0,
"loss": 0.0811,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9122666762298982e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}