LoRA-Chem / Multitask /trainer_state.json
Flyben's picture
Upload 16 files
8c849cc verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 14.993576017130621,
"eval_steps": 500,
"global_step": 1740,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08565310492505353,
"grad_norm": 5.411635875701904,
"learning_rate": 9.999339889379647e-06,
"loss": 1.5609,
"num_input_tokens_seen": 255328,
"step": 10
},
{
"epoch": 0.17130620985010706,
"grad_norm": 0.7214002013206482,
"learning_rate": 9.997058249278764e-06,
"loss": 0.2055,
"num_input_tokens_seen": 510640,
"step": 20
},
{
"epoch": 0.2569593147751606,
"grad_norm": 0.47501006722450256,
"learning_rate": 9.993147673772869e-06,
"loss": 0.184,
"num_input_tokens_seen": 763808,
"step": 30
},
{
"epoch": 0.3426124197002141,
"grad_norm": 0.20641829073429108,
"learning_rate": 9.987609437626955e-06,
"loss": 0.1767,
"num_input_tokens_seen": 1017472,
"step": 40
},
{
"epoch": 0.4282655246252677,
"grad_norm": 0.49966660141944885,
"learning_rate": 9.98044534618898e-06,
"loss": 0.1821,
"num_input_tokens_seen": 1273488,
"step": 50
},
{
"epoch": 0.5139186295503212,
"grad_norm": 0.8994255065917969,
"learning_rate": 9.971657734801385e-06,
"loss": 0.1819,
"num_input_tokens_seen": 1527744,
"step": 60
},
{
"epoch": 0.5995717344753747,
"grad_norm": 1.7688418626785278,
"learning_rate": 9.961249468039806e-06,
"loss": 0.1797,
"num_input_tokens_seen": 1785520,
"step": 70
},
{
"epoch": 0.6852248394004282,
"grad_norm": 0.7211973071098328,
"learning_rate": 9.949223938779286e-06,
"loss": 0.1765,
"num_input_tokens_seen": 2037648,
"step": 80
},
{
"epoch": 0.7708779443254818,
"grad_norm": 0.94338458776474,
"learning_rate": 9.935585067088276e-06,
"loss": 0.1766,
"num_input_tokens_seen": 2292464,
"step": 90
},
{
"epoch": 0.8565310492505354,
"grad_norm": 0.6227909326553345,
"learning_rate": 9.920337298950767e-06,
"loss": 0.1714,
"num_input_tokens_seen": 2547872,
"step": 100
},
{
"epoch": 0.9421841541755889,
"grad_norm": 0.5941164493560791,
"learning_rate": 9.903485604816993e-06,
"loss": 0.1728,
"num_input_tokens_seen": 2801536,
"step": 110
},
{
"epoch": 1.0342612419700214,
"grad_norm": 0.4057783782482147,
"learning_rate": 9.885035477983184e-06,
"loss": 0.1868,
"num_input_tokens_seen": 3074416,
"step": 120
},
{
"epoch": 1.119914346895075,
"grad_norm": 0.5549430251121521,
"learning_rate": 9.864992932800845e-06,
"loss": 0.1679,
"num_input_tokens_seen": 3327568,
"step": 130
},
{
"epoch": 1.2055674518201285,
"grad_norm": 0.41472136974334717,
"learning_rate": 9.843364502716225e-06,
"loss": 0.1671,
"num_input_tokens_seen": 3582240,
"step": 140
},
{
"epoch": 1.291220556745182,
"grad_norm": 0.5156757235527039,
"learning_rate": 9.820157238140535e-06,
"loss": 0.1682,
"num_input_tokens_seen": 3838160,
"step": 150
},
{
"epoch": 1.3768736616702355,
"grad_norm": 0.5046593546867371,
"learning_rate": 9.795378704151675e-06,
"loss": 0.1651,
"num_input_tokens_seen": 4092304,
"step": 160
},
{
"epoch": 1.462526766595289,
"grad_norm": 0.5588434338569641,
"learning_rate": 9.76903697802817e-06,
"loss": 0.1649,
"num_input_tokens_seen": 4346640,
"step": 170
},
{
"epoch": 1.5481798715203428,
"grad_norm": 0.46262454986572266,
"learning_rate": 9.741140646616161e-06,
"loss": 0.1669,
"num_input_tokens_seen": 4602192,
"step": 180
},
{
"epoch": 1.633832976445396,
"grad_norm": 0.45427972078323364,
"learning_rate": 9.711698803530253e-06,
"loss": 0.1674,
"num_input_tokens_seen": 4858240,
"step": 190
},
{
"epoch": 1.7194860813704498,
"grad_norm": 0.4514879882335663,
"learning_rate": 9.68072104618921e-06,
"loss": 0.1641,
"num_input_tokens_seen": 5113408,
"step": 200
},
{
"epoch": 1.805139186295503,
"grad_norm": 0.7933849692344666,
"learning_rate": 9.648217472687385e-06,
"loss": 0.1614,
"num_input_tokens_seen": 5368352,
"step": 210
},
{
"epoch": 1.8907922912205568,
"grad_norm": 0.6207934021949768,
"learning_rate": 9.614198678502965e-06,
"loss": 0.163,
"num_input_tokens_seen": 5622128,
"step": 220
},
{
"epoch": 1.9764453961456103,
"grad_norm": 0.8193040490150452,
"learning_rate": 9.57867575304406e-06,
"loss": 0.1589,
"num_input_tokens_seen": 5876816,
"step": 230
},
{
"epoch": 2.068522483940043,
"grad_norm": 1.0469202995300293,
"learning_rate": 9.541660276033795e-06,
"loss": 0.1755,
"num_input_tokens_seen": 6145392,
"step": 240
},
{
"epoch": 2.154175588865096,
"grad_norm": 0.9274189472198486,
"learning_rate": 9.503164313735566e-06,
"loss": 0.1595,
"num_input_tokens_seen": 6399504,
"step": 250
},
{
"epoch": 2.23982869379015,
"grad_norm": 0.6875982880592346,
"learning_rate": 9.46320041501969e-06,
"loss": 0.1563,
"num_input_tokens_seen": 6654160,
"step": 260
},
{
"epoch": 2.325481798715203,
"grad_norm": 0.5835751295089722,
"learning_rate": 9.421781607272741e-06,
"loss": 0.1554,
"num_input_tokens_seen": 6910752,
"step": 270
},
{
"epoch": 2.411134903640257,
"grad_norm": 0.6475698351860046,
"learning_rate": 9.378921392150893e-06,
"loss": 0.1579,
"num_input_tokens_seen": 7166960,
"step": 280
},
{
"epoch": 2.4967880085653107,
"grad_norm": 0.6029316782951355,
"learning_rate": 9.33463374117867e-06,
"loss": 0.1577,
"num_input_tokens_seen": 7420288,
"step": 290
},
{
"epoch": 2.582441113490364,
"grad_norm": 0.6444355845451355,
"learning_rate": 9.288933091194524e-06,
"loss": 0.1564,
"num_input_tokens_seen": 7675184,
"step": 300
},
{
"epoch": 2.6680942184154177,
"grad_norm": 0.5541071891784668,
"learning_rate": 9.241834339644726e-06,
"loss": 0.1528,
"num_input_tokens_seen": 7926976,
"step": 310
},
{
"epoch": 2.753747323340471,
"grad_norm": 0.6703725457191467,
"learning_rate": 9.193352839727122e-06,
"loss": 0.1549,
"num_input_tokens_seen": 8184992,
"step": 320
},
{
"epoch": 2.8394004282655247,
"grad_norm": 0.6515584588050842,
"learning_rate": 9.143504395386302e-06,
"loss": 0.157,
"num_input_tokens_seen": 8439712,
"step": 330
},
{
"epoch": 2.925053533190578,
"grad_norm": 0.5527693629264832,
"learning_rate": 9.09230525616186e-06,
"loss": 0.157,
"num_input_tokens_seen": 8694080,
"step": 340
},
{
"epoch": 3.017130620985011,
"grad_norm": 0.6593677401542664,
"learning_rate": 9.039772111891383e-06,
"loss": 0.1672,
"num_input_tokens_seen": 8965488,
"step": 350
},
{
"epoch": 3.102783725910064,
"grad_norm": 0.5042828917503357,
"learning_rate": 8.985922087269916e-06,
"loss": 0.1483,
"num_input_tokens_seen": 9220480,
"step": 360
},
{
"epoch": 3.188436830835118,
"grad_norm": 0.4123888611793518,
"learning_rate": 8.930772736267675e-06,
"loss": 0.1532,
"num_input_tokens_seen": 9477024,
"step": 370
},
{
"epoch": 3.274089935760171,
"grad_norm": 0.7851901054382324,
"learning_rate": 8.874342036407815e-06,
"loss": 0.1508,
"num_input_tokens_seen": 9731840,
"step": 380
},
{
"epoch": 3.359743040685225,
"grad_norm": 0.7545840740203857,
"learning_rate": 8.816648382906154e-06,
"loss": 0.1516,
"num_input_tokens_seen": 9986704,
"step": 390
},
{
"epoch": 3.445396145610278,
"grad_norm": 0.7439327239990234,
"learning_rate": 8.757710582674708e-06,
"loss": 0.1506,
"num_input_tokens_seen": 10238720,
"step": 400
},
{
"epoch": 3.531049250535332,
"grad_norm": 0.8343164920806885,
"learning_rate": 8.697547848191037e-06,
"loss": 0.1516,
"num_input_tokens_seen": 10491856,
"step": 410
},
{
"epoch": 3.6167023554603857,
"grad_norm": 0.817565381526947,
"learning_rate": 8.63617979123539e-06,
"loss": 0.1542,
"num_input_tokens_seen": 10744240,
"step": 420
},
{
"epoch": 3.702355460385439,
"grad_norm": 0.5334470272064209,
"learning_rate": 8.573626416497669e-06,
"loss": 0.1446,
"num_input_tokens_seen": 10996768,
"step": 430
},
{
"epoch": 3.7880085653104922,
"grad_norm": 0.9441611766815186,
"learning_rate": 8.509908115056334e-06,
"loss": 0.1515,
"num_input_tokens_seen": 11254560,
"step": 440
},
{
"epoch": 3.873661670235546,
"grad_norm": 0.6177489757537842,
"learning_rate": 8.445045657731329e-06,
"loss": 0.1513,
"num_input_tokens_seen": 11512992,
"step": 450
},
{
"epoch": 3.9593147751605997,
"grad_norm": 0.5743350982666016,
"learning_rate": 8.379060188313244e-06,
"loss": 0.1458,
"num_input_tokens_seen": 11765808,
"step": 460
},
{
"epoch": 4.0513918629550325,
"grad_norm": 0.8525713086128235,
"learning_rate": 8.311973216670888e-06,
"loss": 0.1598,
"num_input_tokens_seen": 12036784,
"step": 470
},
{
"epoch": 4.137044967880086,
"grad_norm": 0.6399952173233032,
"learning_rate": 8.243806611739516e-06,
"loss": 0.1448,
"num_input_tokens_seen": 12290592,
"step": 480
},
{
"epoch": 4.222698072805139,
"grad_norm": 0.657546877861023,
"learning_rate": 8.17458259439202e-06,
"loss": 0.144,
"num_input_tokens_seen": 12542464,
"step": 490
},
{
"epoch": 4.308351177730192,
"grad_norm": 0.6414650678634644,
"learning_rate": 8.104323730195407e-06,
"loss": 0.1406,
"num_input_tokens_seen": 12796848,
"step": 500
},
{
"epoch": 4.394004282655247,
"grad_norm": 0.7480872869491577,
"learning_rate": 8.033052922054882e-06,
"loss": 0.1436,
"num_input_tokens_seen": 13051760,
"step": 510
},
{
"epoch": 4.4796573875803,
"grad_norm": 0.7025752067565918,
"learning_rate": 7.960793402748001e-06,
"loss": 0.147,
"num_input_tokens_seen": 13305808,
"step": 520
},
{
"epoch": 4.565310492505353,
"grad_norm": 0.5708986520767212,
"learning_rate": 7.887568727351262e-06,
"loss": 0.1456,
"num_input_tokens_seen": 13563056,
"step": 530
},
{
"epoch": 4.650963597430406,
"grad_norm": 0.6903087496757507,
"learning_rate": 7.813402765561664e-06,
"loss": 0.143,
"num_input_tokens_seen": 13816992,
"step": 540
},
{
"epoch": 4.736616702355461,
"grad_norm": 0.6083903908729553,
"learning_rate": 7.738319693915673e-06,
"loss": 0.1439,
"num_input_tokens_seen": 14071936,
"step": 550
},
{
"epoch": 4.822269807280514,
"grad_norm": 0.6583831906318665,
"learning_rate": 7.662343987908195e-06,
"loss": 0.147,
"num_input_tokens_seen": 14327440,
"step": 560
},
{
"epoch": 4.907922912205567,
"grad_norm": 0.8827478885650635,
"learning_rate": 7.585500414014077e-06,
"loss": 0.1467,
"num_input_tokens_seen": 14582832,
"step": 570
},
{
"epoch": 4.993576017130621,
"grad_norm": 0.8274891972541809,
"learning_rate": 7.507814021614761e-06,
"loss": 0.1478,
"num_input_tokens_seen": 14839136,
"step": 580
},
{
"epoch": 5.085653104925053,
"grad_norm": 1.3195112943649292,
"learning_rate": 7.429310134832709e-06,
"loss": 0.1517,
"num_input_tokens_seen": 15109264,
"step": 590
},
{
"epoch": 5.1713062098501075,
"grad_norm": 0.7981224656105042,
"learning_rate": 7.35001434427628e-06,
"loss": 0.1396,
"num_input_tokens_seen": 15363824,
"step": 600
},
{
"epoch": 5.256959314775161,
"grad_norm": 0.6522560715675354,
"learning_rate": 7.269952498697734e-06,
"loss": 0.142,
"num_input_tokens_seen": 15618576,
"step": 610
},
{
"epoch": 5.342612419700214,
"grad_norm": 0.7629905343055725,
"learning_rate": 7.189150696567081e-06,
"loss": 0.1384,
"num_input_tokens_seen": 15871056,
"step": 620
},
{
"epoch": 5.428265524625267,
"grad_norm": 0.9554848670959473,
"learning_rate": 7.10763527756453e-06,
"loss": 0.1405,
"num_input_tokens_seen": 16124976,
"step": 630
},
{
"epoch": 5.5139186295503215,
"grad_norm": 0.8175866603851318,
"learning_rate": 7.025432813994315e-06,
"loss": 0.1357,
"num_input_tokens_seen": 16381680,
"step": 640
},
{
"epoch": 5.599571734475375,
"grad_norm": 0.7990790009498596,
"learning_rate": 6.942570102122679e-06,
"loss": 0.1387,
"num_input_tokens_seen": 16638048,
"step": 650
},
{
"epoch": 5.685224839400428,
"grad_norm": 0.9116854667663574,
"learning_rate": 6.859074153442864e-06,
"loss": 0.1414,
"num_input_tokens_seen": 16894688,
"step": 660
},
{
"epoch": 5.770877944325482,
"grad_norm": 0.7633938789367676,
"learning_rate": 6.774972185869928e-06,
"loss": 0.1389,
"num_input_tokens_seen": 17147808,
"step": 670
},
{
"epoch": 5.856531049250536,
"grad_norm": 0.8924551606178284,
"learning_rate": 6.690291614868287e-06,
"loss": 0.1361,
"num_input_tokens_seen": 17403280,
"step": 680
},
{
"epoch": 5.942184154175589,
"grad_norm": 0.8566009998321533,
"learning_rate": 6.60506004451485e-06,
"loss": 0.1356,
"num_input_tokens_seen": 17657888,
"step": 690
},
{
"epoch": 6.034261241970022,
"grad_norm": 0.9057173132896423,
"learning_rate": 6.5193052585006666e-06,
"loss": 0.1483,
"num_input_tokens_seen": 17927520,
"step": 700
},
{
"epoch": 6.119914346895075,
"grad_norm": 0.9895085692405701,
"learning_rate": 6.433055211074042e-06,
"loss": 0.1308,
"num_input_tokens_seen": 18184352,
"step": 710
},
{
"epoch": 6.205567451820128,
"grad_norm": 1.0845868587493896,
"learning_rate": 6.346338017928036e-06,
"loss": 0.1269,
"num_input_tokens_seen": 18437792,
"step": 720
},
{
"epoch": 6.291220556745182,
"grad_norm": 1.021283745765686,
"learning_rate": 6.2591819470353424e-06,
"loss": 0.1301,
"num_input_tokens_seen": 18690144,
"step": 730
},
{
"epoch": 6.376873661670236,
"grad_norm": 1.1350120306015015,
"learning_rate": 6.171615409433525e-06,
"loss": 0.1275,
"num_input_tokens_seen": 18944688,
"step": 740
},
{
"epoch": 6.462526766595289,
"grad_norm": 1.0572874546051025,
"learning_rate": 6.0836669499636255e-06,
"loss": 0.1264,
"num_input_tokens_seen": 19199984,
"step": 750
},
{
"epoch": 6.548179871520342,
"grad_norm": 1.1884225606918335,
"learning_rate": 5.995365237965144e-06,
"loss": 0.1294,
"num_input_tokens_seen": 19452032,
"step": 760
},
{
"epoch": 6.6338329764453965,
"grad_norm": 0.9745492339134216,
"learning_rate": 5.906739057930439e-06,
"loss": 0.1262,
"num_input_tokens_seen": 19707040,
"step": 770
},
{
"epoch": 6.71948608137045,
"grad_norm": 1.090391755104065,
"learning_rate": 5.817817300121592e-06,
"loss": 0.1266,
"num_input_tokens_seen": 19962960,
"step": 780
},
{
"epoch": 6.805139186295503,
"grad_norm": 1.1640676259994507,
"learning_rate": 5.728628951152799e-06,
"loss": 0.1324,
"num_input_tokens_seen": 20219008,
"step": 790
},
{
"epoch": 6.890792291220556,
"grad_norm": 0.9813507199287415,
"learning_rate": 5.639203084541338e-06,
"loss": 0.1338,
"num_input_tokens_seen": 20473664,
"step": 800
},
{
"epoch": 6.9764453961456105,
"grad_norm": 1.11289644241333,
"learning_rate": 5.549568851230219e-06,
"loss": 0.1273,
"num_input_tokens_seen": 20727296,
"step": 810
},
{
"epoch": 7.0685224839400425,
"grad_norm": 1.5624918937683105,
"learning_rate": 5.459755470085595e-06,
"loss": 0.1332,
"num_input_tokens_seen": 20996432,
"step": 820
},
{
"epoch": 7.154175588865097,
"grad_norm": 1.3339862823486328,
"learning_rate": 5.369792218372026e-06,
"loss": 0.1104,
"num_input_tokens_seen": 21252272,
"step": 830
},
{
"epoch": 7.23982869379015,
"grad_norm": 1.5236716270446777,
"learning_rate": 5.2797084222087105e-06,
"loss": 0.1114,
"num_input_tokens_seen": 21508208,
"step": 840
},
{
"epoch": 7.325481798715203,
"grad_norm": 1.4154669046401978,
"learning_rate": 5.189533447009795e-06,
"loss": 0.1134,
"num_input_tokens_seen": 21765536,
"step": 850
},
{
"epoch": 7.4111349036402565,
"grad_norm": 1.5260732173919678,
"learning_rate": 5.099296687911858e-06,
"loss": 0.1102,
"num_input_tokens_seen": 22020160,
"step": 860
},
{
"epoch": 7.496788008565311,
"grad_norm": 1.2989623546600342,
"learning_rate": 5.009027560191732e-06,
"loss": 0.1122,
"num_input_tokens_seen": 22274400,
"step": 870
},
{
"epoch": 7.582441113490364,
"grad_norm": 1.4925442934036255,
"learning_rate": 4.918755489677729e-06,
"loss": 0.1094,
"num_input_tokens_seen": 22526464,
"step": 880
},
{
"epoch": 7.668094218415417,
"grad_norm": 1.3059921264648438,
"learning_rate": 4.828509903157451e-06,
"loss": 0.1128,
"num_input_tokens_seen": 22779664,
"step": 890
},
{
"epoch": 7.7537473233404715,
"grad_norm": 1.6819276809692383,
"learning_rate": 4.738320218785281e-06,
"loss": 0.1146,
"num_input_tokens_seen": 23036160,
"step": 900
},
{
"epoch": 7.839400428265525,
"grad_norm": 1.3909580707550049,
"learning_rate": 4.648215836492682e-06,
"loss": 0.1145,
"num_input_tokens_seen": 23292016,
"step": 910
},
{
"epoch": 7.925053533190578,
"grad_norm": 1.7210851907730103,
"learning_rate": 4.5582261284044385e-06,
"loss": 0.1156,
"num_input_tokens_seen": 23544800,
"step": 920
},
{
"epoch": 8.01713062098501,
"grad_norm": 1.2723944187164307,
"learning_rate": 4.468380429263973e-06,
"loss": 0.1197,
"num_input_tokens_seen": 23816288,
"step": 930
},
{
"epoch": 8.102783725910065,
"grad_norm": 1.9091925621032715,
"learning_rate": 4.378708026870825e-06,
"loss": 0.0916,
"num_input_tokens_seen": 24071488,
"step": 940
},
{
"epoch": 8.188436830835117,
"grad_norm": 1.7839370965957642,
"learning_rate": 4.289238152533465e-06,
"loss": 0.0893,
"num_input_tokens_seen": 24324720,
"step": 950
},
{
"epoch": 8.274089935760172,
"grad_norm": 2.00311541557312,
"learning_rate": 4.199999971540489e-06,
"loss": 0.0889,
"num_input_tokens_seen": 24579648,
"step": 960
},
{
"epoch": 8.359743040685224,
"grad_norm": 2.047337293624878,
"learning_rate": 4.111022573653366e-06,
"loss": 0.0873,
"num_input_tokens_seen": 24833840,
"step": 970
},
{
"epoch": 8.445396145610278,
"grad_norm": 1.9115785360336304,
"learning_rate": 4.0223349636237766e-06,
"loss": 0.0904,
"num_input_tokens_seen": 25089776,
"step": 980
},
{
"epoch": 8.531049250535332,
"grad_norm": 1.8445810079574585,
"learning_rate": 3.933966051738684e-06,
"loss": 0.088,
"num_input_tokens_seen": 25345264,
"step": 990
},
{
"epoch": 8.616702355460385,
"grad_norm": 1.6529115438461304,
"learning_rate": 3.845944644396194e-06,
"loss": 0.0919,
"num_input_tokens_seen": 25598112,
"step": 1000
},
{
"epoch": 8.702355460385439,
"grad_norm": 2.129995346069336,
"learning_rate": 3.758299434715268e-06,
"loss": 0.0906,
"num_input_tokens_seen": 25851728,
"step": 1010
},
{
"epoch": 8.788008565310493,
"grad_norm": 2.1039373874664307,
"learning_rate": 3.6710589931823837e-06,
"loss": 0.0895,
"num_input_tokens_seen": 26104704,
"step": 1020
},
{
"epoch": 8.873661670235546,
"grad_norm": 2.058598518371582,
"learning_rate": 3.584251758338151e-06,
"loss": 0.0923,
"num_input_tokens_seen": 26361680,
"step": 1030
},
{
"epoch": 8.9593147751606,
"grad_norm": 1.8930065631866455,
"learning_rate": 3.4979060275069576e-06,
"loss": 0.0908,
"num_input_tokens_seen": 26617536,
"step": 1040
},
{
"epoch": 9.051391862955033,
"grad_norm": 1.8233646154403687,
"learning_rate": 3.4120499475726266e-06,
"loss": 0.0847,
"num_input_tokens_seen": 26888160,
"step": 1050
},
{
"epoch": 9.137044967880085,
"grad_norm": 2.1758053302764893,
"learning_rate": 3.3267115058031418e-06,
"loss": 0.0657,
"num_input_tokens_seen": 27142528,
"step": 1060
},
{
"epoch": 9.222698072805139,
"grad_norm": 2.0327367782592773,
"learning_rate": 3.2419185207273816e-06,
"loss": 0.0662,
"num_input_tokens_seen": 27394144,
"step": 1070
},
{
"epoch": 9.308351177730193,
"grad_norm": 2.2035434246063232,
"learning_rate": 3.157698633066863e-06,
"loss": 0.0665,
"num_input_tokens_seen": 27649488,
"step": 1080
},
{
"epoch": 9.394004282655246,
"grad_norm": 1.9066494703292847,
"learning_rate": 3.0740792967254606e-06,
"loss": 0.0642,
"num_input_tokens_seen": 27904992,
"step": 1090
},
{
"epoch": 9.4796573875803,
"grad_norm": 2.2175674438476562,
"learning_rate": 2.991087769840001e-06,
"loss": 0.0625,
"num_input_tokens_seen": 28160336,
"step": 1100
},
{
"epoch": 9.565310492505354,
"grad_norm": 2.435115337371826,
"learning_rate": 2.9087511058947014e-06,
"loss": 0.0643,
"num_input_tokens_seen": 28417360,
"step": 1110
},
{
"epoch": 9.650963597430406,
"grad_norm": 2.237015724182129,
"learning_rate": 2.827096144902289e-06,
"loss": 0.0645,
"num_input_tokens_seen": 28670512,
"step": 1120
},
{
"epoch": 9.73661670235546,
"grad_norm": 2.473604202270508,
"learning_rate": 2.7461495046547436e-06,
"loss": 0.068,
"num_input_tokens_seen": 28927232,
"step": 1130
},
{
"epoch": 9.822269807280513,
"grad_norm": 2.220705270767212,
"learning_rate": 2.665937572046432e-06,
"loss": 0.0647,
"num_input_tokens_seen": 29182768,
"step": 1140
},
{
"epoch": 9.907922912205567,
"grad_norm": 2.652024269104004,
"learning_rate": 2.586486494472572e-06,
"loss": 0.0644,
"num_input_tokens_seen": 29437936,
"step": 1150
},
{
"epoch": 9.993576017130621,
"grad_norm": 2.180983304977417,
"learning_rate": 2.5078221713057048e-06,
"loss": 0.0658,
"num_input_tokens_seen": 29690944,
"step": 1160
},
{
"epoch": 10.085653104925054,
"grad_norm": 1.6538355350494385,
"learning_rate": 2.4299702454530605e-06,
"loss": 0.053,
"num_input_tokens_seen": 29964448,
"step": 1170
},
{
"epoch": 10.171306209850107,
"grad_norm": 2.229673147201538,
"learning_rate": 2.3529560949975184e-06,
"loss": 0.0446,
"num_input_tokens_seen": 30215952,
"step": 1180
},
{
"epoch": 10.25695931477516,
"grad_norm": 1.8106822967529297,
"learning_rate": 2.2768048249248648e-06,
"loss": 0.0449,
"num_input_tokens_seen": 30471952,
"step": 1190
},
{
"epoch": 10.342612419700215,
"grad_norm": 2.150508403778076,
"learning_rate": 2.201541258940129e-06,
"loss": 0.0422,
"num_input_tokens_seen": 30727376,
"step": 1200
},
{
"epoch": 10.428265524625267,
"grad_norm": 2.0471906661987305,
"learning_rate": 2.12718993137555e-06,
"loss": 0.0461,
"num_input_tokens_seen": 30983760,
"step": 1210
},
{
"epoch": 10.513918629550322,
"grad_norm": 2.299278497695923,
"learning_rate": 2.0537750791929296e-06,
"loss": 0.0458,
"num_input_tokens_seen": 31238720,
"step": 1220
},
{
"epoch": 10.599571734475374,
"grad_norm": 2.1924257278442383,
"learning_rate": 1.981320634082873e-06,
"loss": 0.0434,
"num_input_tokens_seen": 31494560,
"step": 1230
},
{
"epoch": 10.685224839400428,
"grad_norm": 2.3524584770202637,
"learning_rate": 1.909850214663575e-06,
"loss": 0.0452,
"num_input_tokens_seen": 31750784,
"step": 1240
},
{
"epoch": 10.770877944325482,
"grad_norm": 2.2468934059143066,
"learning_rate": 1.8393871187816526e-06,
"loss": 0.0447,
"num_input_tokens_seen": 32005120,
"step": 1250
},
{
"epoch": 10.856531049250535,
"grad_norm": 2.448117971420288,
"learning_rate": 1.7699543159175215e-06,
"loss": 0.0449,
"num_input_tokens_seen": 32258480,
"step": 1260
},
{
"epoch": 10.942184154175589,
"grad_norm": 2.0848143100738525,
"learning_rate": 1.7015744396978557e-06,
"loss": 0.0442,
"num_input_tokens_seen": 32510944,
"step": 1270
},
{
"epoch": 11.034261241970022,
"grad_norm": 1.6036432981491089,
"learning_rate": 1.634269780517483e-06,
"loss": 0.0435,
"num_input_tokens_seen": 32780608,
"step": 1280
},
{
"epoch": 11.119914346895074,
"grad_norm": 3.015963315963745,
"learning_rate": 1.568062278273197e-06,
"loss": 0.0286,
"num_input_tokens_seen": 33034112,
"step": 1290
},
{
"epoch": 11.205567451820128,
"grad_norm": 1.6929532289505005,
"learning_rate": 1.5029735152118125e-06,
"loss": 0.0308,
"num_input_tokens_seen": 33290224,
"step": 1300
},
{
"epoch": 11.291220556745182,
"grad_norm": 1.9741885662078857,
"learning_rate": 1.4390247088948073e-06,
"loss": 0.0309,
"num_input_tokens_seen": 33544448,
"step": 1310
},
{
"epoch": 11.376873661670235,
"grad_norm": 1.5955508947372437,
"learning_rate": 1.3762367052818527e-06,
"loss": 0.0275,
"num_input_tokens_seen": 33799536,
"step": 1320
},
{
"epoch": 11.462526766595289,
"grad_norm": 2.293123245239258,
"learning_rate": 1.3146299719354544e-06,
"loss": 0.0304,
"num_input_tokens_seen": 34055952,
"step": 1330
},
{
"epoch": 11.548179871520343,
"grad_norm": 1.8011912107467651,
"learning_rate": 1.254224591348983e-06,
"loss": 0.0299,
"num_input_tokens_seen": 34310000,
"step": 1340
},
{
"epoch": 11.633832976445396,
"grad_norm": 1.8339879512786865,
"learning_rate": 1.1950402544001849e-06,
"loss": 0.0311,
"num_input_tokens_seen": 34565680,
"step": 1350
},
{
"epoch": 11.71948608137045,
"grad_norm": 1.6808807849884033,
"learning_rate": 1.1370962539323837e-06,
"loss": 0.0314,
"num_input_tokens_seen": 34820768,
"step": 1360
},
{
"epoch": 11.805139186295504,
"grad_norm": 1.7647879123687744,
"learning_rate": 1.0804114784654158e-06,
"loss": 0.0311,
"num_input_tokens_seen": 35074016,
"step": 1370
},
{
"epoch": 11.890792291220556,
"grad_norm": 1.753990650177002,
"learning_rate": 1.0250044060383734e-06,
"loss": 0.0299,
"num_input_tokens_seen": 35328272,
"step": 1380
},
{
"epoch": 11.97644539614561,
"grad_norm": 2.10841965675354,
"learning_rate": 9.708930981861603e-07,
"loss": 0.03,
"num_input_tokens_seen": 35582880,
"step": 1390
},
{
"epoch": 12.068522483940043,
"grad_norm": 1.4194451570510864,
"learning_rate": 9.180951940518002e-07,
"loss": 0.026,
"num_input_tokens_seen": 35853280,
"step": 1400
},
{
"epoch": 12.154175588865096,
"grad_norm": 1.612318515777588,
"learning_rate": 8.666279046364595e-07,
"loss": 0.0208,
"num_input_tokens_seen": 36106816,
"step": 1410
},
{
"epoch": 12.23982869379015,
"grad_norm": 1.6022765636444092,
"learning_rate": 8.165080071890208e-07,
"loss": 0.0205,
"num_input_tokens_seen": 36359232,
"step": 1420
},
{
"epoch": 12.325481798715204,
"grad_norm": 1.608430027961731,
"learning_rate": 7.677518397370548e-07,
"loss": 0.0228,
"num_input_tokens_seen": 36614176,
"step": 1430
},
{
"epoch": 12.411134903640257,
"grad_norm": 1.4423803091049194,
"learning_rate": 7.203752957609672e-07,
"loss": 0.0207,
"num_input_tokens_seen": 36868400,
"step": 1440
},
{
"epoch": 12.49678800856531,
"grad_norm": 1.6684809923171997,
"learning_rate": 6.743938190130616e-07,
"loss": 0.0215,
"num_input_tokens_seen": 37121536,
"step": 1450
},
{
"epoch": 12.582441113490365,
"grad_norm": 1.7179003953933716,
"learning_rate": 6.298223984832047e-07,
"loss": 0.0216,
"num_input_tokens_seen": 37377168,
"step": 1460
},
{
"epoch": 12.668094218415417,
"grad_norm": 1.6454778909683228,
"learning_rate": 5.866755635127247e-07,
"loss": 0.0207,
"num_input_tokens_seen": 37632992,
"step": 1470
},
{
"epoch": 12.753747323340471,
"grad_norm": 1.8044767379760742,
"learning_rate": 5.449673790581611e-07,
"loss": 0.0217,
"num_input_tokens_seen": 37888640,
"step": 1480
},
{
"epoch": 12.839400428265524,
"grad_norm": 1.874295711517334,
"learning_rate": 5.04711441106382e-07,
"loss": 0.0197,
"num_input_tokens_seen": 38143760,
"step": 1490
},
{
"epoch": 12.925053533190578,
"grad_norm": 1.3250926733016968,
"learning_rate": 4.659208722425806e-07,
"loss": 0.0207,
"num_input_tokens_seen": 38398560,
"step": 1500
},
{
"epoch": 13.01713062098501,
"grad_norm": 1.2411588430404663,
"learning_rate": 4.2860831737258857e-07,
"loss": 0.0216,
"num_input_tokens_seen": 38670912,
"step": 1510
},
{
"epoch": 13.102783725910065,
"grad_norm": 1.3138427734375,
"learning_rate": 3.9278593960090873e-07,
"loss": 0.0167,
"num_input_tokens_seen": 38925872,
"step": 1520
},
{
"epoch": 13.188436830835117,
"grad_norm": 1.362457036972046,
"learning_rate": 3.5846541626579026e-07,
"loss": 0.0159,
"num_input_tokens_seen": 39183632,
"step": 1530
},
{
"epoch": 13.274089935760172,
"grad_norm": 1.515376091003418,
"learning_rate": 3.256579351326744e-07,
"loss": 0.0156,
"num_input_tokens_seen": 39440864,
"step": 1540
},
{
"epoch": 13.359743040685224,
"grad_norm": 1.4070255756378174,
"learning_rate": 2.94374190747212e-07,
"loss": 0.0166,
"num_input_tokens_seen": 39695712,
"step": 1550
},
{
"epoch": 13.445396145610278,
"grad_norm": 1.4853448867797852,
"learning_rate": 2.64624380949069e-07,
"loss": 0.0173,
"num_input_tokens_seen": 39950304,
"step": 1560
},
{
"epoch": 13.531049250535332,
"grad_norm": 1.542286992073059,
"learning_rate": 2.3641820354764755e-07,
"loss": 0.0165,
"num_input_tokens_seen": 40203616,
"step": 1570
},
{
"epoch": 13.616702355460385,
"grad_norm": 1.565663456916809,
"learning_rate": 2.0976485316080375e-07,
"loss": 0.0167,
"num_input_tokens_seen": 40458464,
"step": 1580
},
{
"epoch": 13.702355460385439,
"grad_norm": 1.3701163530349731,
"learning_rate": 1.846730182175993e-07,
"loss": 0.017,
"num_input_tokens_seen": 40711216,
"step": 1590
},
{
"epoch": 13.788008565310493,
"grad_norm": 1.4886751174926758,
"learning_rate": 1.6115087812605123e-07,
"loss": 0.015,
"num_input_tokens_seen": 40965856,
"step": 1600
},
{
"epoch": 13.873661670235546,
"grad_norm": 1.2140471935272217,
"learning_rate": 1.392061006068246e-07,
"loss": 0.0169,
"num_input_tokens_seen": 41220736,
"step": 1610
},
{
"epoch": 13.9593147751606,
"grad_norm": 1.314063549041748,
"learning_rate": 1.1884583919371251e-07,
"loss": 0.0164,
"num_input_tokens_seen": 41473952,
"step": 1620
},
{
"epoch": 14.051391862955033,
"grad_norm": 1.2103674411773682,
"learning_rate": 1.0007673090173808e-07,
"loss": 0.0168,
"num_input_tokens_seen": 41742832,
"step": 1630
},
{
"epoch": 14.137044967880085,
"grad_norm": 1.250216007232666,
"learning_rate": 8.29048940636279e-08,
"loss": 0.0153,
"num_input_tokens_seen": 41998320,
"step": 1640
},
{
"epoch": 14.222698072805139,
"grad_norm": 1.114964485168457,
"learning_rate": 6.733592633536124e-08,
"loss": 0.0148,
"num_input_tokens_seen": 42253104,
"step": 1650
},
{
"epoch": 14.308351177730193,
"grad_norm": 1.3133609294891357,
"learning_rate": 5.3374902871456965e-08,
"loss": 0.0151,
"num_input_tokens_seen": 42509584,
"step": 1660
},
{
"epoch": 14.394004282655246,
"grad_norm": 1.3046901226043701,
"learning_rate": 4.102637467057746e-08,
"loss": 0.0144,
"num_input_tokens_seen": 42764768,
"step": 1670
},
{
"epoch": 14.4796573875803,
"grad_norm": 1.3270611763000488,
"learning_rate": 3.029436709200084e-08,
"loss": 0.0142,
"num_input_tokens_seen": 43019376,
"step": 1680
},
{
"epoch": 14.565310492505354,
"grad_norm": 1.1487038135528564,
"learning_rate": 2.1182378543438408e-08,
"loss": 0.0159,
"num_input_tokens_seen": 43273248,
"step": 1690
},
{
"epoch": 14.650963597430406,
"grad_norm": 1.1392930746078491,
"learning_rate": 1.3693379340626867e-08,
"loss": 0.0148,
"num_input_tokens_seen": 43529200,
"step": 1700
},
{
"epoch": 14.73661670235546,
"grad_norm": 1.24246084690094,
"learning_rate": 7.829810739069521e-09,
"loss": 0.0144,
"num_input_tokens_seen": 43781760,
"step": 1710
},
{
"epoch": 14.822269807280513,
"grad_norm": 1.2764571905136108,
"learning_rate": 3.593584138237294e-09,
"loss": 0.0142,
"num_input_tokens_seen": 44036144,
"step": 1720
},
{
"epoch": 14.907922912205567,
"grad_norm": 1.4254299402236938,
"learning_rate": 9.860804584937988e-10,
"loss": 0.0144,
"num_input_tokens_seen": 44292256,
"step": 1730
},
{
"epoch": 14.993576017130621,
"grad_norm": 1.1011109352111816,
"learning_rate": 8.149690943204391e-12,
"loss": 0.014,
"num_input_tokens_seen": 44548112,
"step": 1740
}
],
"logging_steps": 10,
"max_steps": 1740,
"num_input_tokens_seen": 44548112,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9454424851110953e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}