| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 10.0, | |
| "global_step": 1374, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0004999346542461473, | |
| "loss": 0.4698, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0004997386511451298, | |
| "loss": 0.2312, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0004994120931607106, | |
| "loss": 0.1855, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.0004989551510063109, | |
| "loss": 0.1778, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0004983680635557671, | |
| "loss": 0.1728, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0004976511377184557, | |
| "loss": 0.182, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0004968047482788508, | |
| "loss": 0.1752, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0004958293377006004, | |
| "loss": 0.1742, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0004947254158952209, | |
| "loss": 0.1686, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0004934935599555328, | |
| "loss": 0.1674, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0004921344138539761, | |
| "loss": 0.1665, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.000490648688105964, | |
| "loss": 0.1673, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0004890371593984484, | |
| "loss": 0.1706, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0004873006701838952, | |
| "loss": 0.1674, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0004854401282398789, | |
| "loss": 0.1662, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0004834565061945266, | |
| "loss": 0.1615, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0004813508410180617, | |
| "loss": 0.1643, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00047912423348071056, | |
| "loss": 0.1634, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00047677784757725784, | |
| "loss": 0.1552, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00047431290991854904, | |
| "loss": 0.1605, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.0004717307090902601, | |
| "loss": 0.1523, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00046903259497926896, | |
| "loss": 0.1603, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00046621997806798004, | |
| "loss": 0.1599, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00046329432869697236, | |
| "loss": 0.1624, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.0004602571762963551, | |
| "loss": 0.1669, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.0004571101085862337, | |
| "loss": 0.1579, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0004538547707467038, | |
| "loss": 0.1665, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.0004504928645578067, | |
| "loss": 0.1632, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.0004470261475098967, | |
| "loss": 0.1571, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00044345643188488506, | |
| "loss": 0.154, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00043978558380884013, | |
| "loss": 0.1504, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00043601552227644035, | |
| "loss": 0.1634, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00043214821814778904, | |
| "loss": 0.147, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.0004281856931181155, | |
| "loss": 0.1493, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.0004241300186609015, | |
| "loss": 0.1543, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.0004199833149449853, | |
| "loss": 0.1519, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00041574774972620956, | |
| "loss": 0.152, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00041142553721419184, | |
| "loss": 0.155, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.0004070189369148116, | |
| "loss": 0.1585, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.0004025302524490166, | |
| "loss": 0.1518, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.0003979618303485687, | |
| "loss": 0.1536, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.0003933160588293564, | |
| "loss": 0.1477, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00038859536654291705, | |
| "loss": 0.1494, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00038380222130681986, | |
| "loss": 0.1578, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 0.00037893912881457507, | |
| "loss": 0.1533, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00037400863132574223, | |
| "loss": 0.1511, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.0003690133063369238, | |
| "loss": 0.1548, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.00036395576523433673, | |
| "loss": 0.1459, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.0003588386519286688, | |
| "loss": 0.1518, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 0.00035366464147293223, | |
| "loss": 0.1529, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.0003484364386640365, | |
| "loss": 0.1608, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 0.00034315677662881205, | |
| "loss": 0.1616, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.0003378284153952254, | |
| "loss": 0.1549, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.00033245414044952927, | |
| "loss": 0.155, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 0.00032703676128010553, | |
| "loss": 0.1514, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.0003215791099087601, | |
| "loss": 0.1462, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.0003160840394102379, | |
| "loss": 0.1528, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.0003105544224207326, | |
| "loss": 0.1517, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.00030499314963616987, | |
| "loss": 0.1532, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.0002994031283010493, | |
| "loss": 0.155, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.00029378728068863683, | |
| "loss": 0.1499, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.00028814854257329825, | |
| "loss": 0.1591, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00028248986169577693, | |
| "loss": 0.1501, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.00027681419622221514, | |
| "loss": 0.1449, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.00027112451319772447, | |
| "loss": 0.1423, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.00026542378699531645, | |
| "loss": 0.1462, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.00025971499776100064, | |
| "loss": 0.1469, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 0.00025400112985586587, | |
| "loss": 0.1418, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.00024828517029595767, | |
| "loss": 0.1397, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.00024257010719076723, | |
| "loss": 0.142, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.00023685892818115019, | |
| "loss": 0.1395, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.00023115461887748858, | |
| "loss": 0.1392, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 0.00022546016129891568, | |
| "loss": 0.1458, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.0002197785323144176, | |
| "loss": 0.1341, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.00021411270208662698, | |
| "loss": 0.1475, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00020846563251912355, | |
| "loss": 0.149, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.00020284027570805074, | |
| "loss": 0.1399, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.00019723957239886066, | |
| "loss": 0.1455, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.00019166645044899205, | |
| "loss": 0.147, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.00018612382329728588, | |
| "loss": 0.1499, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 0.00018061458844093858, | |
| "loss": 0.1353, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.0001751416259207889, | |
| "loss": 0.1433, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00016970779681573046, | |
| "loss": 0.1487, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.00016431594174703645, | |
| "loss": 0.1459, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 0.00015896887939338, | |
| "loss": 0.153, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.0001536694050173242, | |
| "loss": 0.1411, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.00014842028900405421, | |
| "loss": 0.1426, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.00014322427541311347, | |
| "loss": 0.1455, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.0001380840805439031, | |
| "loss": 0.1485, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 0.00013300239151569252, | |
| "loss": 0.152, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.00012798186486288483, | |
| "loss": 0.145, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.0001230251251462708, | |
| "loss": 0.1311, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.00011813476358099823, | |
| "loss": 0.1411, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.0001133133366819717, | |
| "loss": 0.1384, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00010856336492739407, | |
| "loss": 0.1455, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 0.00010388733144114604, | |
| "loss": 0.1307, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 9.928768069469313e-05, | |
| "loss": 0.1404, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 9.476681722919939e-05, | |
| "loss": 0.1333, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 9.032710439851444e-05, | |
| "loss": 0.144, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 8.597086313369271e-05, | |
| "loss": 0.1499, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 8.170037072968967e-05, | |
| "loss": 0.1471, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 7.751785965486893e-05, | |
| "loss": 0.1312, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 7.342551638394385e-05, | |
| "loss": 0.1466, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 6.942548025496312e-05, | |
| "loss": 0.147, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 6.551984235093692e-05, | |
| "loss": 0.1375, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 6.171064440669014e-05, | |
| "loss": 0.1398, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 5.7999877741512744e-05, | |
| "loss": 0.1337, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 5.438948221816559e-05, | |
| "loss": 0.1351, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 5.088134522878601e-05, | |
| "loss": 0.1417, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 4.74773007082237e-05, | |
| "loss": 0.14, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 4.417912817532133e-05, | |
| "loss": 0.1423, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 4.098855180264285e-05, | |
| "loss": 0.1327, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 3.7907239515134697e-05, | |
| "loss": 0.1405, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 3.493680211819103e-05, | |
| "loss": 0.1444, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 3.207879245557976e-05, | |
| "loss": 0.1421, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 2.933470459766821e-05, | |
| "loss": 0.1389, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 2.6705973060374117e-05, | |
| "loss": 0.1457, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 2.4193972055249343e-05, | |
| "loss": 0.1451, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 2.180001477108867e-05, | |
| "loss": 0.1343, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 1.9525352687439546e-05, | |
| "loss": 0.1267, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 1.737117492037091e-05, | |
| "loss": 0.1364, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 1.533860760084374e-05, | |
| "loss": 0.1511, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 1.3428713286008004e-05, | |
| "loss": 0.1418, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 1.1642490403733995e-05, | |
| "loss": 0.1403, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 9.980872730667972e-06, | |
| "loss": 0.1433, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 8.444728904085736e-06, | |
| "loss": 0.1491, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 7.0348619677982066e-06, | |
| "loss": 0.1332, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 5.752008952347648e-06, | |
| "loss": 0.1351, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 4.596840489713095e-06, | |
| "loss": 0.1471, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 3.5699604627266734e-06, | |
| "loss": 0.1572, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 2.6719056893842463e-06, | |
| "loss": 0.1436, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 1.9031456422151373e-06, | |
| "loss": 0.1482, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 1.2640822028578414e-06, | |
| "loss": 0.1386, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 7.550494519699968e-07, | |
| "loss": 0.1294, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 3.7631349458230877e-07, | |
| "loss": 0.1439, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 1.280723209880208e-07, | |
| "loss": 0.135, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 1.0455703240069836e-08, | |
| "loss": 0.1345, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 1374, | |
| "total_flos": 3.2608758262451405e+18, | |
| "train_loss": 0.1525881653095784, | |
| "train_runtime": 23876.5124, | |
| "train_samples_per_second": 3.681, | |
| "train_steps_per_second": 0.058 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1374, | |
| "num_train_epochs": 2, | |
| "save_steps": 150, | |
| "total_flos": 3.2608758262451405e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |