Grogros's picture
Training in progress, step 2000, checkpoint
c9a58e3 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005,
"grad_norm": 7.65625,
"learning_rate": 4.5000000000000003e-07,
"loss": 0.3232,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 4.71875,
"learning_rate": 9.500000000000001e-07,
"loss": 0.285,
"step": 20
},
{
"epoch": 0.015,
"grad_norm": 2.578125,
"learning_rate": 1.45e-06,
"loss": 0.2412,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 2.046875,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.2292,
"step": 40
},
{
"epoch": 0.025,
"grad_norm": 1.5390625,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.2119,
"step": 50
},
{
"epoch": 0.03,
"grad_norm": 1.4765625,
"learning_rate": 2.95e-06,
"loss": 0.2054,
"step": 60
},
{
"epoch": 0.035,
"grad_norm": 1.6484375,
"learning_rate": 3.45e-06,
"loss": 0.205,
"step": 70
},
{
"epoch": 0.04,
"grad_norm": 3.15625,
"learning_rate": 3.95e-06,
"loss": 0.1893,
"step": 80
},
{
"epoch": 0.045,
"grad_norm": 1.6875,
"learning_rate": 4.450000000000001e-06,
"loss": 0.181,
"step": 90
},
{
"epoch": 0.05,
"grad_norm": 1.1015625,
"learning_rate": 4.95e-06,
"loss": 0.1848,
"step": 100
},
{
"epoch": 0.055,
"grad_norm": 2.375,
"learning_rate": 5.450000000000001e-06,
"loss": 0.1875,
"step": 110
},
{
"epoch": 0.06,
"grad_norm": 13.875,
"learning_rate": 5.950000000000001e-06,
"loss": 0.1799,
"step": 120
},
{
"epoch": 0.065,
"grad_norm": 13.4375,
"learning_rate": 6.450000000000001e-06,
"loss": 0.1806,
"step": 130
},
{
"epoch": 0.07,
"grad_norm": 8.625,
"learning_rate": 6.95e-06,
"loss": 0.1815,
"step": 140
},
{
"epoch": 0.075,
"grad_norm": 3.75,
"learning_rate": 7.450000000000001e-06,
"loss": 0.1918,
"step": 150
},
{
"epoch": 0.08,
"grad_norm": 6.65625,
"learning_rate": 7.950000000000002e-06,
"loss": 0.1877,
"step": 160
},
{
"epoch": 0.085,
"grad_norm": 1.0859375,
"learning_rate": 8.45e-06,
"loss": 0.1948,
"step": 170
},
{
"epoch": 0.09,
"grad_norm": 66.5,
"learning_rate": 8.95e-06,
"loss": 0.1751,
"step": 180
},
{
"epoch": 0.095,
"grad_norm": 106.0,
"learning_rate": 9.450000000000001e-06,
"loss": 0.1802,
"step": 190
},
{
"epoch": 0.1,
"grad_norm": 5.90625,
"learning_rate": 9.950000000000001e-06,
"loss": 0.176,
"step": 200
},
{
"epoch": 0.105,
"grad_norm": 29.125,
"learning_rate": 9.999383162408303e-06,
"loss": 0.1758,
"step": 210
},
{
"epoch": 0.11,
"grad_norm": 57.5,
"learning_rate": 9.997251079708788e-06,
"loss": 0.1643,
"step": 220
},
{
"epoch": 0.115,
"grad_norm": 48.0,
"learning_rate": 9.993596785920932e-06,
"loss": 0.163,
"step": 230
},
{
"epoch": 0.12,
"grad_norm": 12.9375,
"learning_rate": 9.988421394178027e-06,
"loss": 0.1813,
"step": 240
},
{
"epoch": 0.125,
"grad_norm": 0.921875,
"learning_rate": 9.981726480954532e-06,
"loss": 0.1661,
"step": 250
},
{
"epoch": 0.13,
"grad_norm": 0.7109375,
"learning_rate": 9.973514085585871e-06,
"loss": 0.1708,
"step": 260
},
{
"epoch": 0.135,
"grad_norm": 0.6953125,
"learning_rate": 9.963786709647228e-06,
"loss": 0.1671,
"step": 270
},
{
"epoch": 0.14,
"grad_norm": 272.0,
"learning_rate": 9.952547316191545e-06,
"loss": 0.1554,
"step": 280
},
{
"epoch": 0.145,
"grad_norm": 6.90625,
"learning_rate": 9.939799328846947e-06,
"loss": 0.1525,
"step": 290
},
{
"epoch": 0.15,
"grad_norm": 0.69921875,
"learning_rate": 9.92554663077387e-06,
"loss": 0.1676,
"step": 300
},
{
"epoch": 0.155,
"grad_norm": 36.0,
"learning_rate": 9.90979356348222e-06,
"loss": 0.1685,
"step": 310
},
{
"epoch": 0.16,
"grad_norm": 11.9375,
"learning_rate": 9.892544925508894e-06,
"loss": 0.1595,
"step": 320
},
{
"epoch": 0.165,
"grad_norm": 99.0,
"learning_rate": 9.87380597095611e-06,
"loss": 0.1662,
"step": 330
},
{
"epoch": 0.17,
"grad_norm": 57.5,
"learning_rate": 9.853582407890954e-06,
"loss": 0.1706,
"step": 340
},
{
"epoch": 0.175,
"grad_norm": 25.125,
"learning_rate": 9.831880396606649e-06,
"loss": 0.1602,
"step": 350
},
{
"epoch": 0.18,
"grad_norm": 85.5,
"learning_rate": 9.808706547746057e-06,
"loss": 0.1659,
"step": 360
},
{
"epoch": 0.185,
"grad_norm": 12.9375,
"learning_rate": 9.78406792028804e-06,
"loss": 0.1559,
"step": 370
},
{
"epoch": 0.19,
"grad_norm": 55.5,
"learning_rate": 9.757972019397192e-06,
"loss": 0.1681,
"step": 380
},
{
"epoch": 0.195,
"grad_norm": 0.71484375,
"learning_rate": 9.730426794137727e-06,
"loss": 0.1666,
"step": 390
},
{
"epoch": 0.2,
"grad_norm": 0.73828125,
"learning_rate": 9.701440635052094e-06,
"loss": 0.1599,
"step": 400
},
{
"epoch": 0.205,
"grad_norm": 0.703125,
"learning_rate": 9.671022371605148e-06,
"loss": 0.1643,
"step": 410
},
{
"epoch": 0.21,
"grad_norm": 0.671875,
"learning_rate": 9.6391812694946e-06,
"loss": 0.1482,
"step": 420
},
{
"epoch": 0.215,
"grad_norm": 10.9375,
"learning_rate": 9.605927027828608e-06,
"loss": 0.1483,
"step": 430
},
{
"epoch": 0.22,
"grad_norm": 33.75,
"learning_rate": 9.571269776171319e-06,
"loss": 0.1548,
"step": 440
},
{
"epoch": 0.225,
"grad_norm": 6.59375,
"learning_rate": 9.535220071457325e-06,
"loss": 0.1401,
"step": 450
},
{
"epoch": 0.23,
"grad_norm": 20.625,
"learning_rate": 9.497788894775903e-06,
"loss": 0.1427,
"step": 460
},
{
"epoch": 0.235,
"grad_norm": 6.03125,
"learning_rate": 9.458987648026071e-06,
"loss": 0.1464,
"step": 470
},
{
"epoch": 0.24,
"grad_norm": 3.703125,
"learning_rate": 9.418828150443469e-06,
"loss": 0.1409,
"step": 480
},
{
"epoch": 0.245,
"grad_norm": 0.734375,
"learning_rate": 9.37732263500009e-06,
"loss": 0.1522,
"step": 490
},
{
"epoch": 0.25,
"grad_norm": 0.6640625,
"learning_rate": 9.334483744678015e-06,
"loss": 0.1586,
"step": 500
},
{
"epoch": 0.255,
"grad_norm": 0.73046875,
"learning_rate": 9.290324528618225e-06,
"loss": 0.1518,
"step": 510
},
{
"epoch": 0.26,
"grad_norm": 1.375,
"learning_rate": 9.244858438145709e-06,
"loss": 0.1648,
"step": 520
},
{
"epoch": 0.265,
"grad_norm": 2.21875,
"learning_rate": 9.198099322672066e-06,
"loss": 0.1529,
"step": 530
},
{
"epoch": 0.27,
"grad_norm": 25.625,
"learning_rate": 9.150061425476839e-06,
"loss": 0.1521,
"step": 540
},
{
"epoch": 0.275,
"grad_norm": 22.5,
"learning_rate": 9.100759379368863e-06,
"loss": 0.1612,
"step": 550
},
{
"epoch": 0.28,
"grad_norm": 0.75390625,
"learning_rate": 9.050208202228981e-06,
"loss": 0.171,
"step": 560
},
{
"epoch": 0.285,
"grad_norm": 225.0,
"learning_rate": 8.998423292435455e-06,
"loss": 0.1437,
"step": 570
},
{
"epoch": 0.29,
"grad_norm": 6.40625,
"learning_rate": 8.945420424173455e-06,
"loss": 0.1524,
"step": 580
},
{
"epoch": 0.295,
"grad_norm": 13.4375,
"learning_rate": 8.891215742630106e-06,
"loss": 0.1257,
"step": 590
},
{
"epoch": 0.3,
"grad_norm": 1.09375,
"learning_rate": 8.8358257590765e-06,
"loss": 0.1348,
"step": 600
},
{
"epoch": 0.305,
"grad_norm": 48.5,
"learning_rate": 8.779267345838198e-06,
"loss": 0.1396,
"step": 610
},
{
"epoch": 0.31,
"grad_norm": 0.5703125,
"learning_rate": 8.72155773115577e-06,
"loss": 0.1345,
"step": 620
},
{
"epoch": 0.315,
"grad_norm": 0.796875,
"learning_rate": 8.662714493936895e-06,
"loss": 0.1675,
"step": 630
},
{
"epoch": 0.32,
"grad_norm": 0.62109375,
"learning_rate": 8.602755558401653e-06,
"loss": 0.1451,
"step": 640
},
{
"epoch": 0.325,
"grad_norm": 0.734375,
"learning_rate": 8.541699188622645e-06,
"loss": 0.1328,
"step": 650
},
{
"epoch": 0.33,
"grad_norm": 80.5,
"learning_rate": 8.479563982961572e-06,
"loss": 0.1443,
"step": 660
},
{
"epoch": 0.335,
"grad_norm": 2.75,
"learning_rate": 8.416368868403997e-06,
"loss": 0.1348,
"step": 670
},
{
"epoch": 0.34,
"grad_norm": 40.0,
"learning_rate": 8.352133094793996e-06,
"loss": 0.1315,
"step": 680
},
{
"epoch": 0.345,
"grad_norm": 31.0,
"learning_rate": 8.28687622897048e-06,
"loss": 0.1359,
"step": 690
},
{
"epoch": 0.35,
"grad_norm": 0.70703125,
"learning_rate": 8.220618148806934e-06,
"loss": 0.145,
"step": 700
},
{
"epoch": 0.355,
"grad_norm": 7.5625,
"learning_rate": 8.153379037156433e-06,
"loss": 0.1619,
"step": 710
},
{
"epoch": 0.36,
"grad_norm": 1.9140625,
"learning_rate": 8.085179375703745e-06,
"loss": 0.1393,
"step": 720
},
{
"epoch": 0.365,
"grad_norm": 14.0625,
"learning_rate": 8.016039938726413e-06,
"loss": 0.1495,
"step": 730
},
{
"epoch": 0.37,
"grad_norm": 0.640625,
"learning_rate": 7.945981786766712e-06,
"loss": 0.1356,
"step": 740
},
{
"epoch": 0.375,
"grad_norm": 0.6953125,
"learning_rate": 7.875026260216395e-06,
"loss": 0.1548,
"step": 750
},
{
"epoch": 0.38,
"grad_norm": 3.296875,
"learning_rate": 7.80319497281621e-06,
"loss": 0.1436,
"step": 760
},
{
"epoch": 0.385,
"grad_norm": 0.6875,
"learning_rate": 7.730509805072146e-06,
"loss": 0.1369,
"step": 770
},
{
"epoch": 0.39,
"grad_norm": 0.60546875,
"learning_rate": 7.656992897590416e-06,
"loss": 0.1196,
"step": 780
},
{
"epoch": 0.395,
"grad_norm": 0.65234375,
"learning_rate": 7.58266664433321e-06,
"loss": 0.1295,
"step": 790
},
{
"epoch": 0.4,
"grad_norm": 44.0,
"learning_rate": 7.507553685797288e-06,
"loss": 0.1048,
"step": 800
},
{
"epoch": 0.405,
"grad_norm": 3.109375,
"learning_rate": 7.431676902117453e-06,
"loss": 0.1345,
"step": 810
},
{
"epoch": 0.41,
"grad_norm": 1.40625,
"learning_rate": 7.35505940609705e-06,
"loss": 0.1286,
"step": 820
},
{
"epoch": 0.415,
"grad_norm": 0.59765625,
"learning_rate": 7.2777245361675786e-06,
"loss": 0.1197,
"step": 830
},
{
"epoch": 0.42,
"grad_norm": 0.62890625,
"learning_rate": 7.199695849279576e-06,
"loss": 0.1242,
"step": 840
},
{
"epoch": 0.425,
"grad_norm": 12.5625,
"learning_rate": 7.120997113726951e-06,
"loss": 0.1275,
"step": 850
},
{
"epoch": 0.43,
"grad_norm": 0.73046875,
"learning_rate": 7.041652301906925e-06,
"loss": 0.1331,
"step": 860
},
{
"epoch": 0.435,
"grad_norm": 0.62109375,
"learning_rate": 6.961685583017808e-06,
"loss": 0.1286,
"step": 870
},
{
"epoch": 0.44,
"grad_norm": 0.5625,
"learning_rate": 6.881121315696828e-06,
"loss": 0.1348,
"step": 880
},
{
"epoch": 0.445,
"grad_norm": 278.0,
"learning_rate": 6.799984040600257e-06,
"loss": 0.1358,
"step": 890
},
{
"epoch": 0.45,
"grad_norm": 4.3125,
"learning_rate": 6.718298472928082e-06,
"loss": 0.127,
"step": 900
},
{
"epoch": 0.455,
"grad_norm": 0.63671875,
"learning_rate": 6.63608949489552e-06,
"loss": 0.1317,
"step": 910
},
{
"epoch": 0.46,
"grad_norm": 184.0,
"learning_rate": 6.55338214815366e-06,
"loss": 0.1283,
"step": 920
},
{
"epoch": 0.465,
"grad_norm": 7.125,
"learning_rate": 6.47020162616152e-06,
"loss": 0.1306,
"step": 930
},
{
"epoch": 0.47,
"grad_norm": 0.65234375,
"learning_rate": 6.386573266511891e-06,
"loss": 0.1369,
"step": 940
},
{
"epoch": 0.475,
"grad_norm": 0.59375,
"learning_rate": 6.3025225432132434e-06,
"loss": 0.109,
"step": 950
},
{
"epoch": 0.48,
"grad_norm": 3.671875,
"learning_rate": 6.218075058930113e-06,
"loss": 0.1213,
"step": 960
},
{
"epoch": 0.485,
"grad_norm": 2.203125,
"learning_rate": 6.133256537184276e-06,
"loss": 0.1237,
"step": 970
},
{
"epoch": 0.49,
"grad_norm": 0.66796875,
"learning_rate": 6.048092814519109e-06,
"loss": 0.1121,
"step": 980
},
{
"epoch": 0.495,
"grad_norm": 2.90625,
"learning_rate": 5.962609832629538e-06,
"loss": 0.1065,
"step": 990
},
{
"epoch": 0.5,
"grad_norm": 11.5,
"learning_rate": 5.876833630459936e-06,
"loss": 0.1054,
"step": 1000
},
{
"epoch": 0.505,
"grad_norm": 1208.0,
"learning_rate": 5.7907903362724195e-06,
"loss": 0.1123,
"step": 1010
},
{
"epoch": 0.51,
"grad_norm": 0.66015625,
"learning_rate": 5.704506159687914e-06,
"loss": 0.1317,
"step": 1020
},
{
"epoch": 0.515,
"grad_norm": 0.69921875,
"learning_rate": 5.618007383702464e-06,
"loss": 0.1168,
"step": 1030
},
{
"epoch": 0.52,
"grad_norm": 0.5390625,
"learning_rate": 5.5313203566811666e-06,
"loss": 0.1157,
"step": 1040
},
{
"epoch": 0.525,
"grad_norm": 0.6171875,
"learning_rate": 5.4444714843322085e-06,
"loss": 0.1085,
"step": 1050
},
{
"epoch": 0.53,
"grad_norm": 0.68359375,
"learning_rate": 5.35748722166343e-06,
"loss": 0.1309,
"step": 1060
},
{
"epoch": 0.535,
"grad_norm": 1064.0,
"learning_rate": 5.270394064923878e-06,
"loss": 0.1137,
"step": 1070
},
{
"epoch": 0.54,
"grad_norm": 0.56640625,
"learning_rate": 5.183218543532782e-06,
"loss": 0.1291,
"step": 1080
},
{
"epoch": 0.545,
"grad_norm": 1.34375,
"learning_rate": 5.09598721199845e-06,
"loss": 0.1225,
"step": 1090
},
{
"epoch": 0.55,
"grad_norm": 1744.0,
"learning_rate": 5.008726641829492e-06,
"loss": 0.1157,
"step": 1100
},
{
"epoch": 0.555,
"grad_norm": 0.6015625,
"learning_rate": 4.921463413440898e-06,
"loss": 0.1181,
"step": 1110
},
{
"epoch": 0.56,
"grad_norm": 0.5078125,
"learning_rate": 4.8342241080573696e-06,
"loss": 0.1171,
"step": 1120
},
{
"epoch": 0.565,
"grad_norm": 0.796875,
"learning_rate": 4.747035299616434e-06,
"loss": 0.1225,
"step": 1130
},
{
"epoch": 0.57,
"grad_norm": 0.41796875,
"learning_rate": 4.659923546673761e-06,
"loss": 0.1147,
"step": 1140
},
{
"epoch": 0.575,
"grad_norm": 0.390625,
"learning_rate": 4.572915384313163e-06,
"loss": 0.0917,
"step": 1150
},
{
"epoch": 0.58,
"grad_norm": 0.59375,
"learning_rate": 4.4860373160637665e-06,
"loss": 0.1165,
"step": 1160
},
{
"epoch": 0.585,
"grad_norm": 0.423828125,
"learning_rate": 4.399315805826765e-06,
"loss": 0.1047,
"step": 1170
},
{
"epoch": 0.59,
"grad_norm": 0.8046875,
"learning_rate": 4.312777269814268e-06,
"loss": 0.107,
"step": 1180
},
{
"epoch": 0.595,
"grad_norm": 0.42578125,
"learning_rate": 4.226448068502661e-06,
"loss": 0.0948,
"step": 1190
},
{
"epoch": 0.6,
"grad_norm": 45.5,
"learning_rate": 4.140354498602952e-06,
"loss": 0.105,
"step": 1200
},
{
"epoch": 0.605,
"grad_norm": 0.61328125,
"learning_rate": 4.054522785050543e-06,
"loss": 0.1044,
"step": 1210
},
{
"epoch": 0.61,
"grad_norm": 1.40625,
"learning_rate": 3.968979073016853e-06,
"loss": 0.1084,
"step": 1220
},
{
"epoch": 0.615,
"grad_norm": 0.60546875,
"learning_rate": 3.883749419945244e-06,
"loss": 0.1227,
"step": 1230
},
{
"epoch": 0.62,
"grad_norm": 0.458984375,
"learning_rate": 3.798859787613682e-06,
"loss": 0.1048,
"step": 1240
},
{
"epoch": 0.625,
"grad_norm": 0.58203125,
"learning_rate": 3.7143360342265206e-06,
"loss": 0.1101,
"step": 1250
},
{
"epoch": 0.63,
"grad_norm": 0.5390625,
"learning_rate": 3.630203906537838e-06,
"loss": 0.1292,
"step": 1260
},
{
"epoch": 0.635,
"grad_norm": 0.54296875,
"learning_rate": 3.5464890320087374e-06,
"loss": 0.113,
"step": 1270
},
{
"epoch": 0.64,
"grad_norm": 0.625,
"learning_rate": 3.463216911000965e-06,
"loss": 0.1209,
"step": 1280
},
{
"epoch": 0.645,
"grad_norm": 0.53515625,
"learning_rate": 3.3804129090092542e-06,
"loss": 0.1129,
"step": 1290
},
{
"epoch": 0.65,
"grad_norm": 0.5234375,
"learning_rate": 3.2981022489347503e-06,
"loss": 0.0997,
"step": 1300
},
{
"epoch": 0.655,
"grad_norm": 0.44140625,
"learning_rate": 3.2163100034018735e-06,
"loss": 0.1102,
"step": 1310
},
{
"epoch": 0.66,
"grad_norm": 0.58984375,
"learning_rate": 3.1350610871209553e-06,
"loss": 0.1036,
"step": 1320
},
{
"epoch": 0.665,
"grad_norm": 1232.0,
"learning_rate": 3.0543802492989693e-06,
"loss": 0.1196,
"step": 1330
},
{
"epoch": 0.67,
"grad_norm": 0.42578125,
"learning_rate": 2.974292066100688e-06,
"loss": 0.0981,
"step": 1340
},
{
"epoch": 0.675,
"grad_norm": 0.39453125,
"learning_rate": 2.8948209331625454e-06,
"loss": 0.0964,
"step": 1350
},
{
"epoch": 0.68,
"grad_norm": 0.4375,
"learning_rate": 2.8159910581614904e-06,
"loss": 0.1111,
"step": 1360
},
{
"epoch": 0.685,
"grad_norm": 0.419921875,
"learning_rate": 2.7378264534410865e-06,
"loss": 0.121,
"step": 1370
},
{
"epoch": 0.69,
"grad_norm": 0.53125,
"learning_rate": 2.6603509286971342e-06,
"loss": 0.1113,
"step": 1380
},
{
"epoch": 0.695,
"grad_norm": 0.5078125,
"learning_rate": 2.5835880837249884e-06,
"loss": 0.1156,
"step": 1390
},
{
"epoch": 0.7,
"grad_norm": 0.5234375,
"learning_rate": 2.507561301230849e-06,
"loss": 0.1158,
"step": 1400
},
{
"epoch": 0.705,
"grad_norm": 0.5546875,
"learning_rate": 2.432293739709151e-06,
"loss": 0.1078,
"step": 1410
},
{
"epoch": 0.71,
"grad_norm": 0.458984375,
"learning_rate": 2.357808326388265e-06,
"loss": 0.1191,
"step": 1420
},
{
"epoch": 0.715,
"grad_norm": 1272.0,
"learning_rate": 2.284127750246646e-06,
"loss": 0.1097,
"step": 1430
},
{
"epoch": 0.72,
"grad_norm": 0.45703125,
"learning_rate": 2.2112744551015496e-06,
"loss": 0.0934,
"step": 1440
},
{
"epoch": 0.725,
"grad_norm": 0.416015625,
"learning_rate": 2.13927063277242e-06,
"loss": 0.108,
"step": 1450
},
{
"epoch": 0.73,
"grad_norm": 0.474609375,
"learning_rate": 2.0681382163210533e-06,
"loss": 0.1103,
"step": 1460
},
{
"epoch": 0.735,
"grad_norm": 25.375,
"learning_rate": 1.9978988733705807e-06,
"loss": 0.1024,
"step": 1470
},
{
"epoch": 0.74,
"grad_norm": 0.4765625,
"learning_rate": 1.928573999505284e-06,
"loss": 0.1053,
"step": 1480
},
{
"epoch": 0.745,
"grad_norm": 0.3828125,
"learning_rate": 1.8601847117533112e-06,
"loss": 0.0983,
"step": 1490
},
{
"epoch": 0.75,
"grad_norm": 0.404296875,
"learning_rate": 1.7927518421542106e-06,
"loss": 0.112,
"step": 1500
},
{
"epoch": 0.755,
"grad_norm": 20.0,
"learning_rate": 1.7262959314133015e-06,
"loss": 0.1054,
"step": 1510
},
{
"epoch": 0.76,
"grad_norm": 0.451171875,
"learning_rate": 1.6608372226447678e-06,
"loss": 0.1233,
"step": 1520
},
{
"epoch": 0.765,
"grad_norm": 0.57421875,
"learning_rate": 1.596395655205411e-06,
"loss": 0.1035,
"step": 1530
},
{
"epoch": 0.77,
"grad_norm": 0.45703125,
"learning_rate": 1.5329908586209347e-06,
"loss": 0.1088,
"step": 1540
},
{
"epoch": 0.775,
"grad_norm": 0.4375,
"learning_rate": 1.4706421466065952e-06,
"loss": 0.0909,
"step": 1550
},
{
"epoch": 0.78,
"grad_norm": 0.4921875,
"learning_rate": 1.4093685111840567e-06,
"loss": 0.1082,
"step": 1560
},
{
"epoch": 0.785,
"grad_norm": 0.51953125,
"learning_rate": 1.349188616896238e-06,
"loss": 0.1099,
"step": 1570
},
{
"epoch": 0.79,
"grad_norm": 0.4453125,
"learning_rate": 1.2901207951219186e-06,
"loss": 0.1093,
"step": 1580
},
{
"epoch": 0.795,
"grad_norm": 0.439453125,
"learning_rate": 1.2321830384918116e-06,
"loss": 0.1081,
"step": 1590
},
{
"epoch": 0.8,
"grad_norm": 0.470703125,
"learning_rate": 1.1753929954078414e-06,
"loss": 0.0944,
"step": 1600
},
{
"epoch": 0.805,
"grad_norm": 0.44140625,
"learning_rate": 1.1197679646672698e-06,
"loss": 0.1087,
"step": 1610
},
{
"epoch": 0.81,
"grad_norm": 0.3828125,
"learning_rate": 1.065324890193314e-06,
"loss": 0.1035,
"step": 1620
},
{
"epoch": 0.815,
"grad_norm": 0.37109375,
"learning_rate": 1.0120803558738585e-06,
"loss": 0.1019,
"step": 1630
},
{
"epoch": 0.82,
"grad_norm": 0.4453125,
"learning_rate": 9.600505805098486e-07,
"loss": 0.1028,
"step": 1640
},
{
"epoch": 0.825,
"grad_norm": 0.447265625,
"learning_rate": 9.09251412874882e-07,
"loss": 0.123,
"step": 1650
},
{
"epoch": 0.83,
"grad_norm": 0.4375,
"learning_rate": 8.596983268875281e-07,
"loss": 0.1013,
"step": 1660
},
{
"epoch": 0.835,
"grad_norm": 0.484375,
"learning_rate": 8.114064168978064e-07,
"loss": 0.1039,
"step": 1670
},
{
"epoch": 0.84,
"grad_norm": 0.48828125,
"learning_rate": 7.643903930893154e-07,
"loss": 0.1038,
"step": 1680
},
{
"epoch": 0.845,
"grad_norm": 0.45703125,
"learning_rate": 7.186645769983591e-07,
"loss": 0.0997,
"step": 1690
},
{
"epoch": 0.85,
"grad_norm": 0.33203125,
"learning_rate": 6.742428971514786e-07,
"loss": 0.1018,
"step": 1700
},
{
"epoch": 0.855,
"grad_norm": 0.4375,
"learning_rate": 6.311388848226741e-07,
"loss": 0.1103,
"step": 1710
},
{
"epoch": 0.86,
"grad_norm": 0.42578125,
"learning_rate": 5.893656699116618e-07,
"loss": 0.1045,
"step": 1720
},
{
"epoch": 0.865,
"grad_norm": 0.431640625,
"learning_rate": 5.489359769443675e-07,
"loss": 0.1129,
"step": 1730
},
{
"epoch": 0.87,
"grad_norm": 0.546875,
"learning_rate": 5.098621211969224e-07,
"loss": 0.1161,
"step": 1740
},
{
"epoch": 0.875,
"grad_norm": 0.345703125,
"learning_rate": 4.72156004944303e-07,
"loss": 0.1061,
"step": 1750
},
{
"epoch": 0.88,
"grad_norm": 0.376953125,
"learning_rate": 4.3582911383478646e-07,
"loss": 0.1022,
"step": 1760
},
{
"epoch": 0.885,
"grad_norm": 0.546875,
"learning_rate": 4.0089251339131164e-07,
"loss": 0.109,
"step": 1770
},
{
"epoch": 0.89,
"grad_norm": 6.6875,
"learning_rate": 3.6735684564081385e-07,
"loss": 0.1044,
"step": 1780
},
{
"epoch": 0.895,
"grad_norm": 0.412109375,
"learning_rate": 3.352323258725554e-07,
"loss": 0.103,
"step": 1790
},
{
"epoch": 0.9,
"grad_norm": 0.41796875,
"learning_rate": 3.0452873952645455e-07,
"loss": 0.1174,
"step": 1800
},
{
"epoch": 0.905,
"grad_norm": 2.3125,
"learning_rate": 2.752554392123463e-07,
"loss": 0.1116,
"step": 1810
},
{
"epoch": 0.91,
"grad_norm": 0.447265625,
"learning_rate": 2.474213418610816e-07,
"loss": 0.1121,
"step": 1820
},
{
"epoch": 0.915,
"grad_norm": 6.21875,
"learning_rate": 2.210349260083494e-07,
"loss": 0.11,
"step": 1830
},
{
"epoch": 0.92,
"grad_norm": 0.453125,
"learning_rate": 1.961042292120291e-07,
"loss": 0.1096,
"step": 1840
},
{
"epoch": 0.925,
"grad_norm": 1096.0,
"learning_rate": 1.7263684560387518e-07,
"loss": 0.1173,
"step": 1850
},
{
"epoch": 0.93,
"grad_norm": 0.515625,
"learning_rate": 1.5063992357626623e-07,
"loss": 0.1093,
"step": 1860
},
{
"epoch": 0.935,
"grad_norm": 0.64453125,
"learning_rate": 1.3012016360474223e-07,
"loss": 0.1025,
"step": 1870
},
{
"epoch": 0.94,
"grad_norm": 0.421875,
"learning_rate": 1.1108381620696885e-07,
"loss": 0.1176,
"step": 1880
},
{
"epoch": 0.945,
"grad_norm": 0.41796875,
"learning_rate": 9.353668003877437e-08,
"loss": 0.1005,
"step": 1890
},
{
"epoch": 0.95,
"grad_norm": 0.44140625,
"learning_rate": 7.748410012781705e-08,
"loss": 0.1127,
"step": 1900
},
{
"epoch": 0.955,
"grad_norm": 0.384765625,
"learning_rate": 6.293096624544304e-08,
"loss": 0.0938,
"step": 1910
},
{
"epoch": 0.96,
"grad_norm": 0.45703125,
"learning_rate": 4.988171141721232e-08,
"loss": 0.1102,
"step": 1920
},
{
"epoch": 0.965,
"grad_norm": 0.31640625,
"learning_rate": 3.83403105725566e-08,
"loss": 0.0989,
"step": 1930
},
{
"epoch": 0.97,
"grad_norm": 0.52734375,
"learning_rate": 2.8310279333976786e-08,
"loss": 0.1033,
"step": 1940
},
{
"epoch": 0.975,
"grad_norm": 0.408203125,
"learning_rate": 1.9794672946152337e-08,
"loss": 0.0999,
"step": 1950
},
{
"epoch": 0.98,
"grad_norm": 0.48828125,
"learning_rate": 1.2796085345280207e-08,
"loss": 0.1143,
"step": 1960
},
{
"epoch": 0.985,
"grad_norm": 740.0,
"learning_rate": 7.3166483689413035e-09,
"loss": 0.11,
"step": 1970
},
{
"epoch": 0.99,
"grad_norm": 0.44921875,
"learning_rate": 3.3580311067188396e-09,
"loss": 0.1156,
"step": 1980
},
{
"epoch": 0.995,
"grad_norm": 77.5,
"learning_rate": 9.214393917789111e-10,
"loss": 0.11,
"step": 1990
},
{
"epoch": 1.0,
"grad_norm": 0.46875,
"learning_rate": 7.615433561536379e-12,
"loss": 0.1143,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1121122028290048e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}