Grogros's picture
Training in progress, step 2000, checkpoint
61b2fe4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.34325,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005,
"grad_norm": 10.3125,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.2492,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 11.0,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.3168,
"step": 20
},
{
"epoch": 0.015,
"grad_norm": 8.9375,
"learning_rate": 3e-06,
"loss": 1.3578,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 10.1875,
"learning_rate": 4.000000000000001e-06,
"loss": 1.2434,
"step": 40
},
{
"epoch": 0.025,
"grad_norm": 9.5625,
"learning_rate": 5e-06,
"loss": 1.2461,
"step": 50
},
{
"epoch": 0.03,
"grad_norm": 7.4375,
"learning_rate": 6e-06,
"loss": 1.1131,
"step": 60
},
{
"epoch": 0.035,
"grad_norm": 11.6875,
"learning_rate": 7e-06,
"loss": 1.1562,
"step": 70
},
{
"epoch": 0.04,
"grad_norm": 8.6875,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2025,
"step": 80
},
{
"epoch": 0.045,
"grad_norm": 6.875,
"learning_rate": 9e-06,
"loss": 1.1477,
"step": 90
},
{
"epoch": 0.05,
"grad_norm": 6.90625,
"learning_rate": 1e-05,
"loss": 1.0277,
"step": 100
},
{
"epoch": 0.055,
"grad_norm": 8.25,
"learning_rate": 1.1000000000000001e-05,
"loss": 1.0428,
"step": 110
},
{
"epoch": 0.06,
"grad_norm": 7.78125,
"learning_rate": 1.2e-05,
"loss": 0.9688,
"step": 120
},
{
"epoch": 0.065,
"grad_norm": 6.71875,
"learning_rate": 1.3000000000000001e-05,
"loss": 0.9908,
"step": 130
},
{
"epoch": 0.07,
"grad_norm": 8.125,
"learning_rate": 1.4e-05,
"loss": 1.0992,
"step": 140
},
{
"epoch": 0.075,
"grad_norm": 8.4375,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.9598,
"step": 150
},
{
"epoch": 0.08,
"grad_norm": 9.25,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.9885,
"step": 160
},
{
"epoch": 0.085,
"grad_norm": 13.875,
"learning_rate": 1.7e-05,
"loss": 1.2307,
"step": 170
},
{
"epoch": 0.09,
"grad_norm": 9.3125,
"learning_rate": 1.8e-05,
"loss": 0.9137,
"step": 180
},
{
"epoch": 0.095,
"grad_norm": 9.25,
"learning_rate": 1.9e-05,
"loss": 1.5107,
"step": 190
},
{
"epoch": 0.1,
"grad_norm": 96.0,
"learning_rate": 2e-05,
"loss": 0.9955,
"step": 200
},
{
"epoch": 0.105,
"grad_norm": 26.25,
"learning_rate": 1.9998476951563914e-05,
"loss": 1.6652,
"step": 210
},
{
"epoch": 0.11,
"grad_norm": 7.21875,
"learning_rate": 1.999390827019096e-05,
"loss": 0.86,
"step": 220
},
{
"epoch": 0.115,
"grad_norm": 56.0,
"learning_rate": 1.9986295347545738e-05,
"loss": 1.9418,
"step": 230
},
{
"epoch": 0.12,
"grad_norm": 11.625,
"learning_rate": 1.9975640502598243e-05,
"loss": 0.8643,
"step": 240
},
{
"epoch": 0.125,
"grad_norm": 52.0,
"learning_rate": 1.9961946980917457e-05,
"loss": 1.3936,
"step": 250
},
{
"epoch": 0.13,
"grad_norm": 28.125,
"learning_rate": 1.9945218953682736e-05,
"loss": 0.8059,
"step": 260
},
{
"epoch": 0.135,
"grad_norm": 12.125,
"learning_rate": 1.9925461516413224e-05,
"loss": 0.8197,
"step": 270
},
{
"epoch": 0.14,
"grad_norm": 30.5,
"learning_rate": 1.9902680687415704e-05,
"loss": 1.3477,
"step": 280
},
{
"epoch": 0.145,
"grad_norm": 24.5,
"learning_rate": 1.9876883405951378e-05,
"loss": 1.0397,
"step": 290
},
{
"epoch": 0.15,
"grad_norm": 7.15625,
"learning_rate": 1.9848077530122083e-05,
"loss": 0.8006,
"step": 300
},
{
"epoch": 0.155,
"grad_norm": 87.0,
"learning_rate": 1.9816271834476642e-05,
"loss": 1.0712,
"step": 310
},
{
"epoch": 0.16,
"grad_norm": 6.375,
"learning_rate": 1.9781476007338058e-05,
"loss": 0.6074,
"step": 320
},
{
"epoch": 0.165,
"grad_norm": 74.0,
"learning_rate": 1.9743700647852356e-05,
"loss": 0.8439,
"step": 330
},
{
"epoch": 0.17,
"grad_norm": 7.59375,
"learning_rate": 1.9702957262759964e-05,
"loss": 0.6888,
"step": 340
},
{
"epoch": 0.175,
"grad_norm": 59.25,
"learning_rate": 1.9659258262890683e-05,
"loss": 0.7408,
"step": 350
},
{
"epoch": 0.18,
"grad_norm": 24.0,
"learning_rate": 1.961261695938319e-05,
"loss": 0.6303,
"step": 360
},
{
"epoch": 0.185,
"grad_norm": 7.625,
"learning_rate": 1.9563047559630356e-05,
"loss": 2.4628,
"step": 370
},
{
"epoch": 0.19,
"grad_norm": 20.125,
"learning_rate": 1.9510565162951538e-05,
"loss": 0.7479,
"step": 380
},
{
"epoch": 0.195,
"grad_norm": 9.0,
"learning_rate": 1.945518575599317e-05,
"loss": 0.6927,
"step": 390
},
{
"epoch": 0.2,
"grad_norm": 11.5625,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.5542,
"step": 400
},
{
"epoch": 0.205,
"grad_norm": 6.9375,
"learning_rate": 1.9335804264972018e-05,
"loss": 1.097,
"step": 410
},
{
"epoch": 0.21,
"grad_norm": 118.5,
"learning_rate": 1.9271838545667876e-05,
"loss": 1.0457,
"step": 420
},
{
"epoch": 0.215,
"grad_norm": 9.0,
"learning_rate": 1.9205048534524405e-05,
"loss": 0.9044,
"step": 430
},
{
"epoch": 0.22,
"grad_norm": 8.3125,
"learning_rate": 1.913545457642601e-05,
"loss": 1.1971,
"step": 440
},
{
"epoch": 0.225,
"grad_norm": 10.625,
"learning_rate": 1.9063077870366504e-05,
"loss": 0.9662,
"step": 450
},
{
"epoch": 0.23,
"grad_norm": 68.5,
"learning_rate": 1.8987940462991673e-05,
"loss": 0.5489,
"step": 460
},
{
"epoch": 0.235,
"grad_norm": 21.625,
"learning_rate": 1.891006524188368e-05,
"loss": 0.3805,
"step": 470
},
{
"epoch": 0.24,
"grad_norm": 22.125,
"learning_rate": 1.8829475928589272e-05,
"loss": 0.4361,
"step": 480
},
{
"epoch": 0.245,
"grad_norm": 8.6875,
"learning_rate": 1.874619707139396e-05,
"loss": 0.4573,
"step": 490
},
{
"epoch": 0.25,
"grad_norm": 85.0,
"learning_rate": 1.866025403784439e-05,
"loss": 1.7953,
"step": 500
},
{
"epoch": 0.255,
"grad_norm": 5.59375,
"learning_rate": 1.8571673007021124e-05,
"loss": 0.319,
"step": 510
},
{
"epoch": 0.26,
"grad_norm": 5.875,
"learning_rate": 1.848048096156426e-05,
"loss": 0.778,
"step": 520
},
{
"epoch": 0.265,
"grad_norm": 85.0,
"learning_rate": 1.8386705679454243e-05,
"loss": 1.6186,
"step": 530
},
{
"epoch": 0.27,
"grad_norm": 7.875,
"learning_rate": 1.8290375725550417e-05,
"loss": 1.8343,
"step": 540
},
{
"epoch": 0.275,
"grad_norm": 7.4375,
"learning_rate": 1.819152044288992e-05,
"loss": 0.4746,
"step": 550
},
{
"epoch": 0.28,
"grad_norm": 5.8125,
"learning_rate": 1.8090169943749477e-05,
"loss": 1.074,
"step": 560
},
{
"epoch": 0.285,
"grad_norm": 5.15625,
"learning_rate": 1.798635510047293e-05,
"loss": 0.2415,
"step": 570
},
{
"epoch": 0.29,
"grad_norm": 14.25,
"learning_rate": 1.788010753606722e-05,
"loss": 0.2946,
"step": 580
},
{
"epoch": 0.295,
"grad_norm": 12.125,
"learning_rate": 1.777145961456971e-05,
"loss": 0.3634,
"step": 590
},
{
"epoch": 0.3,
"grad_norm": 5.3125,
"learning_rate": 1.766044443118978e-05,
"loss": 0.7839,
"step": 600
},
{
"epoch": 0.305,
"grad_norm": 5.96875,
"learning_rate": 1.7547095802227723e-05,
"loss": 0.502,
"step": 610
},
{
"epoch": 0.31,
"grad_norm": 91.0,
"learning_rate": 1.7431448254773943e-05,
"loss": 0.3595,
"step": 620
},
{
"epoch": 0.315,
"grad_norm": 26.875,
"learning_rate": 1.7313537016191706e-05,
"loss": 0.5093,
"step": 630
},
{
"epoch": 0.32,
"grad_norm": 4.8125,
"learning_rate": 1.7193398003386514e-05,
"loss": 0.2864,
"step": 640
},
{
"epoch": 0.325,
"grad_norm": 6.0,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.4504,
"step": 650
},
{
"epoch": 0.33,
"grad_norm": 6.3125,
"learning_rate": 1.6946583704589973e-05,
"loss": 0.2695,
"step": 660
},
{
"epoch": 0.335,
"grad_norm": 7.21875,
"learning_rate": 1.6819983600624986e-05,
"loss": 0.479,
"step": 670
},
{
"epoch": 0.34,
"grad_norm": 106.0,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.7245,
"step": 680
},
{
"epoch": 0.345,
"grad_norm": 5.90625,
"learning_rate": 1.6560590289905074e-05,
"loss": 0.4877,
"step": 690
},
{
"epoch": 0.35,
"grad_norm": 5.78125,
"learning_rate": 1.6427876096865394e-05,
"loss": 0.571,
"step": 700
},
{
"epoch": 0.355,
"grad_norm": 11.8125,
"learning_rate": 1.6293203910498375e-05,
"loss": 0.2269,
"step": 710
},
{
"epoch": 0.36,
"grad_norm": 6.75,
"learning_rate": 1.6156614753256583e-05,
"loss": 0.575,
"step": 720
},
{
"epoch": 0.365,
"grad_norm": 3.875,
"learning_rate": 1.6018150231520486e-05,
"loss": 0.1943,
"step": 730
},
{
"epoch": 0.37,
"grad_norm": 4.15625,
"learning_rate": 1.5877852522924733e-05,
"loss": 0.2041,
"step": 740
},
{
"epoch": 0.375,
"grad_norm": 3.515625,
"learning_rate": 1.573576436351046e-05,
"loss": 0.9804,
"step": 750
},
{
"epoch": 0.38,
"grad_norm": 3.296875,
"learning_rate": 1.5591929034707468e-05,
"loss": 1.1265,
"step": 760
},
{
"epoch": 0.385,
"grad_norm": 5.09375,
"learning_rate": 1.5446390350150272e-05,
"loss": 0.752,
"step": 770
},
{
"epoch": 0.39,
"grad_norm": 4.40625,
"learning_rate": 1.529919264233205e-05,
"loss": 0.1821,
"step": 780
},
{
"epoch": 0.395,
"grad_norm": 4.09375,
"learning_rate": 1.5150380749100545e-05,
"loss": 0.1494,
"step": 790
},
{
"epoch": 0.4,
"grad_norm": 3.96875,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.2066,
"step": 800
},
{
"epoch": 0.405,
"grad_norm": 6.0,
"learning_rate": 1.4848096202463373e-05,
"loss": 0.1645,
"step": 810
},
{
"epoch": 0.41,
"grad_norm": 11.4375,
"learning_rate": 1.469471562785891e-05,
"loss": 0.1673,
"step": 820
},
{
"epoch": 0.415,
"grad_norm": 9.625,
"learning_rate": 1.4539904997395468e-05,
"loss": 0.7342,
"step": 830
},
{
"epoch": 0.42,
"grad_norm": 2.375,
"learning_rate": 1.4383711467890776e-05,
"loss": 0.4579,
"step": 840
},
{
"epoch": 0.425,
"grad_norm": 5.375,
"learning_rate": 1.4226182617406996e-05,
"loss": 0.3541,
"step": 850
},
{
"epoch": 0.43,
"grad_norm": 2.078125,
"learning_rate": 1.4067366430758004e-05,
"loss": 0.1596,
"step": 860
},
{
"epoch": 0.435,
"grad_norm": 3.21875,
"learning_rate": 1.3907311284892737e-05,
"loss": 0.1419,
"step": 870
},
{
"epoch": 0.44,
"grad_norm": 3.8125,
"learning_rate": 1.3746065934159123e-05,
"loss": 0.1184,
"step": 880
},
{
"epoch": 0.445,
"grad_norm": 37.75,
"learning_rate": 1.3583679495453e-05,
"loss": 0.4206,
"step": 890
},
{
"epoch": 0.45,
"grad_norm": 67.0,
"learning_rate": 1.342020143325669e-05,
"loss": 1.6792,
"step": 900
},
{
"epoch": 0.455,
"grad_norm": 4.375,
"learning_rate": 1.3255681544571568e-05,
"loss": 0.4549,
"step": 910
},
{
"epoch": 0.46,
"grad_norm": 10.625,
"learning_rate": 1.3090169943749475e-05,
"loss": 0.1843,
"step": 920
},
{
"epoch": 0.465,
"grad_norm": 3.796875,
"learning_rate": 1.2923717047227368e-05,
"loss": 0.1655,
"step": 930
},
{
"epoch": 0.47,
"grad_norm": 2.296875,
"learning_rate": 1.2756373558169992e-05,
"loss": 0.1113,
"step": 940
},
{
"epoch": 0.475,
"grad_norm": 2.84375,
"learning_rate": 1.2588190451025209e-05,
"loss": 0.1282,
"step": 950
},
{
"epoch": 0.48,
"grad_norm": 1.8203125,
"learning_rate": 1.2419218955996677e-05,
"loss": 0.1195,
"step": 960
},
{
"epoch": 0.485,
"grad_norm": 1.6015625,
"learning_rate": 1.2249510543438652e-05,
"loss": 0.1383,
"step": 970
},
{
"epoch": 0.49,
"grad_norm": 2.875,
"learning_rate": 1.2079116908177592e-05,
"loss": 0.1135,
"step": 980
},
{
"epoch": 0.495,
"grad_norm": 2.359375,
"learning_rate": 1.190808995376545e-05,
"loss": 0.1054,
"step": 990
},
{
"epoch": 0.5,
"grad_norm": 1.671875,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.0947,
"step": 1000
},
{
"epoch": 0.505,
"grad_norm": 1.9140625,
"learning_rate": 1.156434465040231e-05,
"loss": 0.0942,
"step": 1010
},
{
"epoch": 0.51,
"grad_norm": 1.703125,
"learning_rate": 1.1391731009600655e-05,
"loss": 0.088,
"step": 1020
},
{
"epoch": 0.515,
"grad_norm": 2.671875,
"learning_rate": 1.1218693434051475e-05,
"loss": 0.0831,
"step": 1030
},
{
"epoch": 0.52,
"grad_norm": 76.5,
"learning_rate": 1.1045284632676535e-05,
"loss": 0.9448,
"step": 1040
},
{
"epoch": 0.525,
"grad_norm": 2.0625,
"learning_rate": 1.0871557427476585e-05,
"loss": 0.0911,
"step": 1050
},
{
"epoch": 0.53,
"grad_norm": 4.0,
"learning_rate": 1.0697564737441254e-05,
"loss": 0.0968,
"step": 1060
},
{
"epoch": 0.535,
"grad_norm": 2.796875,
"learning_rate": 1.0523359562429441e-05,
"loss": 0.1019,
"step": 1070
},
{
"epoch": 0.54,
"grad_norm": 2.125,
"learning_rate": 1.0348994967025012e-05,
"loss": 0.266,
"step": 1080
},
{
"epoch": 0.545,
"grad_norm": 1.46875,
"learning_rate": 1.0174524064372837e-05,
"loss": 0.095,
"step": 1090
},
{
"epoch": 0.55,
"grad_norm": 1.2109375,
"learning_rate": 1e-05,
"loss": 0.1129,
"step": 1100
},
{
"epoch": 0.555,
"grad_norm": 2.25,
"learning_rate": 9.825475935627165e-06,
"loss": 0.0981,
"step": 1110
},
{
"epoch": 0.56,
"grad_norm": 2.234375,
"learning_rate": 9.651005032974994e-06,
"loss": 0.0866,
"step": 1120
},
{
"epoch": 0.565,
"grad_norm": 1.703125,
"learning_rate": 9.476640437570562e-06,
"loss": 0.0988,
"step": 1130
},
{
"epoch": 0.57,
"grad_norm": 1.5703125,
"learning_rate": 9.302435262558748e-06,
"loss": 0.0764,
"step": 1140
},
{
"epoch": 0.575,
"grad_norm": 1.59375,
"learning_rate": 9.128442572523418e-06,
"loss": 0.0844,
"step": 1150
},
{
"epoch": 0.58,
"grad_norm": 1.703125,
"learning_rate": 8.954715367323468e-06,
"loss": 0.0742,
"step": 1160
},
{
"epoch": 0.585,
"grad_norm": 2.0,
"learning_rate": 8.781306565948528e-06,
"loss": 0.0713,
"step": 1170
},
{
"epoch": 0.59,
"grad_norm": 1.3359375,
"learning_rate": 8.60826899039935e-06,
"loss": 0.0711,
"step": 1180
},
{
"epoch": 0.595,
"grad_norm": 1.53125,
"learning_rate": 8.43565534959769e-06,
"loss": 0.0688,
"step": 1190
},
{
"epoch": 0.6,
"grad_norm": 1.4296875,
"learning_rate": 8.263518223330698e-06,
"loss": 0.0661,
"step": 1200
},
{
"epoch": 0.605,
"grad_norm": 2.8125,
"learning_rate": 8.091910046234552e-06,
"loss": 0.0674,
"step": 1210
},
{
"epoch": 0.61,
"grad_norm": 1.828125,
"learning_rate": 7.92088309182241e-06,
"loss": 0.0646,
"step": 1220
},
{
"epoch": 0.615,
"grad_norm": 1.5390625,
"learning_rate": 7.750489456561351e-06,
"loss": 0.0703,
"step": 1230
},
{
"epoch": 0.62,
"grad_norm": 1.7109375,
"learning_rate": 7.580781044003324e-06,
"loss": 0.0661,
"step": 1240
},
{
"epoch": 0.625,
"grad_norm": 1.3984375,
"learning_rate": 7.411809548974792e-06,
"loss": 0.066,
"step": 1250
},
{
"epoch": 0.63,
"grad_norm": 1.59375,
"learning_rate": 7.243626441830009e-06,
"loss": 0.0674,
"step": 1260
},
{
"epoch": 0.635,
"grad_norm": 2.109375,
"learning_rate": 7.076282952772634e-06,
"loss": 0.0687,
"step": 1270
},
{
"epoch": 0.64,
"grad_norm": 1.25,
"learning_rate": 6.909830056250527e-06,
"loss": 0.0682,
"step": 1280
},
{
"epoch": 0.645,
"grad_norm": 1.53125,
"learning_rate": 6.744318455428436e-06,
"loss": 0.0629,
"step": 1290
},
{
"epoch": 0.65,
"grad_norm": 1.03125,
"learning_rate": 6.579798566743314e-06,
"loss": 0.0633,
"step": 1300
},
{
"epoch": 0.655,
"grad_norm": 1.1171875,
"learning_rate": 6.4163205045469975e-06,
"loss": 0.1746,
"step": 1310
},
{
"epoch": 1.00325,
"grad_norm": 0.90234375,
"learning_rate": 6.25393406584088e-06,
"loss": 0.0615,
"step": 1320
},
{
"epoch": 1.00825,
"grad_norm": 1.984375,
"learning_rate": 6.092688715107265e-06,
"loss": 0.054,
"step": 1330
},
{
"epoch": 1.01325,
"grad_norm": 1.15625,
"learning_rate": 5.932633569242e-06,
"loss": 0.0515,
"step": 1340
},
{
"epoch": 1.01825,
"grad_norm": 1.3515625,
"learning_rate": 5.773817382593008e-06,
"loss": 0.0585,
"step": 1350
},
{
"epoch": 1.02325,
"grad_norm": 0.953125,
"learning_rate": 5.616288532109225e-06,
"loss": 0.0638,
"step": 1360
},
{
"epoch": 1.02825,
"grad_norm": 0.9453125,
"learning_rate": 5.460095002604533e-06,
"loss": 0.0591,
"step": 1370
},
{
"epoch": 1.03325,
"grad_norm": 1.15625,
"learning_rate": 5.305284372141095e-06,
"loss": 0.056,
"step": 1380
},
{
"epoch": 1.03825,
"grad_norm": 61.25,
"learning_rate": 5.151903797536631e-06,
"loss": 0.075,
"step": 1390
},
{
"epoch": 1.04325,
"grad_norm": 0.92578125,
"learning_rate": 5.000000000000003e-06,
"loss": 0.0569,
"step": 1400
},
{
"epoch": 1.04825,
"grad_norm": 0.859375,
"learning_rate": 4.849619250899458e-06,
"loss": 0.0593,
"step": 1410
},
{
"epoch": 1.05325,
"grad_norm": 1.3515625,
"learning_rate": 4.700807357667953e-06,
"loss": 0.0586,
"step": 1420
},
{
"epoch": 1.05825,
"grad_norm": 0.86328125,
"learning_rate": 4.5536096498497295e-06,
"loss": 0.0553,
"step": 1430
},
{
"epoch": 1.06325,
"grad_norm": 0.85546875,
"learning_rate": 4.408070965292534e-06,
"loss": 0.0591,
"step": 1440
},
{
"epoch": 1.06825,
"grad_norm": 0.75,
"learning_rate": 4.264235636489542e-06,
"loss": 0.0518,
"step": 1450
},
{
"epoch": 1.07325,
"grad_norm": 1.171875,
"learning_rate": 4.12214747707527e-06,
"loss": 0.0552,
"step": 1460
},
{
"epoch": 1.07825,
"grad_norm": 0.76171875,
"learning_rate": 3.981849768479516e-06,
"loss": 0.0503,
"step": 1470
},
{
"epoch": 1.08325,
"grad_norm": 0.74609375,
"learning_rate": 3.8433852467434175e-06,
"loss": 0.0493,
"step": 1480
},
{
"epoch": 1.08825,
"grad_norm": 0.9375,
"learning_rate": 3.7067960895016277e-06,
"loss": 0.0511,
"step": 1490
},
{
"epoch": 1.09325,
"grad_norm": 0.859375,
"learning_rate": 3.5721239031346067e-06,
"loss": 0.052,
"step": 1500
},
{
"epoch": 1.09825,
"grad_norm": 0.9609375,
"learning_rate": 3.4394097100949286e-06,
"loss": 0.0508,
"step": 1510
},
{
"epoch": 1.10325,
"grad_norm": 0.8984375,
"learning_rate": 3.308693936411421e-06,
"loss": 0.0518,
"step": 1520
},
{
"epoch": 1.10825,
"grad_norm": 1.0,
"learning_rate": 3.1800163993750166e-06,
"loss": 0.054,
"step": 1530
},
{
"epoch": 1.11325,
"grad_norm": 0.86328125,
"learning_rate": 3.0534162954100264e-06,
"loss": 0.0524,
"step": 1540
},
{
"epoch": 1.11825,
"grad_norm": 0.73828125,
"learning_rate": 2.9289321881345257e-06,
"loss": 0.0544,
"step": 1550
},
{
"epoch": 1.12325,
"grad_norm": 0.8359375,
"learning_rate": 2.8066019966134907e-06,
"loss": 0.0589,
"step": 1560
},
{
"epoch": 1.12825,
"grad_norm": 0.76171875,
"learning_rate": 2.6864629838082957e-06,
"loss": 0.0529,
"step": 1570
},
{
"epoch": 1.13325,
"grad_norm": 0.890625,
"learning_rate": 2.5685517452260566e-06,
"loss": 0.0531,
"step": 1580
},
{
"epoch": 1.13825,
"grad_norm": 1.140625,
"learning_rate": 2.45290419777228e-06,
"loss": 0.0553,
"step": 1590
},
{
"epoch": 1.14325,
"grad_norm": 1.0078125,
"learning_rate": 2.339555568810221e-06,
"loss": 0.0542,
"step": 1600
},
{
"epoch": 1.14825,
"grad_norm": 0.84375,
"learning_rate": 2.2285403854302912e-06,
"loss": 0.0528,
"step": 1610
},
{
"epoch": 1.1532499999999999,
"grad_norm": 0.83203125,
"learning_rate": 2.119892463932781e-06,
"loss": 0.0495,
"step": 1620
},
{
"epoch": 1.15825,
"grad_norm": 1.2734375,
"learning_rate": 2.013644899527074e-06,
"loss": 0.0529,
"step": 1630
},
{
"epoch": 1.1632500000000001,
"grad_norm": 2.09375,
"learning_rate": 1.9098300562505266e-06,
"loss": 0.0835,
"step": 1640
},
{
"epoch": 1.16825,
"grad_norm": 0.76171875,
"learning_rate": 1.808479557110081e-06,
"loss": 0.0514,
"step": 1650
},
{
"epoch": 1.17325,
"grad_norm": 1.125,
"learning_rate": 1.709624274449584e-06,
"loss": 0.0484,
"step": 1660
},
{
"epoch": 1.17825,
"grad_norm": 0.70703125,
"learning_rate": 1.6132943205457607e-06,
"loss": 0.0496,
"step": 1670
},
{
"epoch": 1.18325,
"grad_norm": 0.9453125,
"learning_rate": 1.5195190384357405e-06,
"loss": 0.0501,
"step": 1680
},
{
"epoch": 1.18825,
"grad_norm": 1.203125,
"learning_rate": 1.4283269929788779e-06,
"loss": 0.0515,
"step": 1690
},
{
"epoch": 1.19325,
"grad_norm": 0.8203125,
"learning_rate": 1.339745962155613e-06,
"loss": 0.0528,
"step": 1700
},
{
"epoch": 1.19825,
"grad_norm": 1.0625,
"learning_rate": 1.2538029286060428e-06,
"loss": 0.0499,
"step": 1710
},
{
"epoch": 1.20325,
"grad_norm": 2.21875,
"learning_rate": 1.1705240714107301e-06,
"loss": 0.0546,
"step": 1720
},
{
"epoch": 1.20825,
"grad_norm": 0.859375,
"learning_rate": 1.0899347581163222e-06,
"loss": 0.0541,
"step": 1730
},
{
"epoch": 1.21325,
"grad_norm": 0.82421875,
"learning_rate": 1.012059537008332e-06,
"loss": 0.0523,
"step": 1740
},
{
"epoch": 1.21825,
"grad_norm": 0.9375,
"learning_rate": 9.369221296335007e-07,
"loss": 0.0492,
"step": 1750
},
{
"epoch": 1.22325,
"grad_norm": 0.859375,
"learning_rate": 8.645454235739903e-07,
"loss": 0.0544,
"step": 1760
},
{
"epoch": 1.22825,
"grad_norm": 0.7265625,
"learning_rate": 7.949514654755963e-07,
"loss": 0.0542,
"step": 1770
},
{
"epoch": 1.23325,
"grad_norm": 0.77734375,
"learning_rate": 7.281614543321269e-07,
"loss": 0.0522,
"step": 1780
},
{
"epoch": 1.23825,
"grad_norm": 1.0625,
"learning_rate": 6.641957350279838e-07,
"loss": 0.0492,
"step": 1790
},
{
"epoch": 1.24325,
"grad_norm": 0.921875,
"learning_rate": 6.030737921409169e-07,
"loss": 0.05,
"step": 1800
},
{
"epoch": 1.24825,
"grad_norm": 0.90625,
"learning_rate": 5.448142440068316e-07,
"loss": 0.0484,
"step": 1810
},
{
"epoch": 1.25325,
"grad_norm": 1.1171875,
"learning_rate": 4.894348370484648e-07,
"loss": 0.0481,
"step": 1820
},
{
"epoch": 1.2582499999999999,
"grad_norm": 0.765625,
"learning_rate": 4.3695244036964567e-07,
"loss": 0.0492,
"step": 1830
},
{
"epoch": 1.26325,
"grad_norm": 0.73828125,
"learning_rate": 3.8738304061681107e-07,
"loss": 0.0492,
"step": 1840
},
{
"epoch": 1.26825,
"grad_norm": 1.7109375,
"learning_rate": 3.4074173710931804e-07,
"loss": 0.0501,
"step": 1850
},
{
"epoch": 1.27325,
"grad_norm": 0.80859375,
"learning_rate": 2.970427372400353e-07,
"loss": 0.0488,
"step": 1860
},
{
"epoch": 1.2782499999999999,
"grad_norm": 0.95703125,
"learning_rate": 2.5629935214764866e-07,
"loss": 0.0499,
"step": 1870
},
{
"epoch": 1.28325,
"grad_norm": 0.78125,
"learning_rate": 2.1852399266194312e-07,
"loss": 0.0503,
"step": 1880
},
{
"epoch": 1.2882500000000001,
"grad_norm": 0.83203125,
"learning_rate": 1.8372816552336025e-07,
"loss": 0.0515,
"step": 1890
},
{
"epoch": 1.29325,
"grad_norm": 0.76171875,
"learning_rate": 1.519224698779198e-07,
"loss": 0.0546,
"step": 1900
},
{
"epoch": 1.29825,
"grad_norm": 0.796875,
"learning_rate": 1.231165940486234e-07,
"loss": 0.054,
"step": 1910
},
{
"epoch": 1.30325,
"grad_norm": 0.94921875,
"learning_rate": 9.731931258429638e-08,
"loss": 0.0537,
"step": 1920
},
{
"epoch": 1.3082500000000001,
"grad_norm": 0.875,
"learning_rate": 7.453848358678018e-08,
"loss": 0.0515,
"step": 1930
},
{
"epoch": 1.31325,
"grad_norm": 0.79296875,
"learning_rate": 5.4781046317267103e-08,
"loss": 0.0538,
"step": 1940
},
{
"epoch": 1.31825,
"grad_norm": 0.98046875,
"learning_rate": 3.805301908254455e-08,
"loss": 0.0497,
"step": 1950
},
{
"epoch": 1.32325,
"grad_norm": 0.8046875,
"learning_rate": 2.4359497401758026e-08,
"loss": 0.0531,
"step": 1960
},
{
"epoch": 1.32825,
"grad_norm": 0.72265625,
"learning_rate": 1.370465245426167e-08,
"loss": 0.0506,
"step": 1970
},
{
"epoch": 1.33325,
"grad_norm": 2.0625,
"learning_rate": 6.091729809042379e-09,
"loss": 0.0499,
"step": 1980
},
{
"epoch": 1.33825,
"grad_norm": 0.69140625,
"learning_rate": 1.5230484360873043e-09,
"loss": 0.0466,
"step": 1990
},
{
"epoch": 1.34325,
"grad_norm": 0.86328125,
"learning_rate": 0.0,
"loss": 0.0484,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9130140524439142e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}