| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9997613053189132, | |
| "eval_steps": 500, | |
| "global_step": 10996, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0004977264459803565, | |
| "loss": 5.874, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0004954528919607129, | |
| "loss": 4.0765, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0004931793379410695, | |
| "loss": 3.7752, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.000490905783921426, | |
| "loss": 3.6357, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.0004886322299017824, | |
| "loss": 3.5374, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00048635867588213893, | |
| "loss": 3.4875, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.0004840851218624955, | |
| "loss": 3.411, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00048181156784285196, | |
| "loss": 3.3791, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.0004795380138232084, | |
| "loss": 3.3377, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00047726445980356494, | |
| "loss": 3.292, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00047499090578392145, | |
| "loss": 3.2765, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0004727173517642779, | |
| "loss": 3.2584, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0004704437977446344, | |
| "loss": 3.2319, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0004681702437249909, | |
| "loss": 3.2021, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00046589668970534745, | |
| "loss": 3.1816, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0004636231356857039, | |
| "loss": 3.165, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0004613495816660604, | |
| "loss": 3.1436, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0004590760276464169, | |
| "loss": 3.1491, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0004568024736267734, | |
| "loss": 3.1343, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00045452891960712986, | |
| "loss": 3.1208, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0004522553655874864, | |
| "loss": 3.0954, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00044998181156784284, | |
| "loss": 3.0963, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00044770825754819935, | |
| "loss": 3.098, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00044543470352855586, | |
| "loss": 3.0695, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0004431611495089123, | |
| "loss": 3.0785, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00044088759548926884, | |
| "loss": 3.0524, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0004386140414696253, | |
| "loss": 3.0275, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0004363404874499818, | |
| "loss": 3.0368, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00043406693343033833, | |
| "loss": 3.0177, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0004317933794106948, | |
| "loss": 3.0117, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0004295198253910513, | |
| "loss": 3.0023, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.0004272462713714078, | |
| "loss": 2.9814, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0004249727173517643, | |
| "loss": 2.9867, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.0004226991633321208, | |
| "loss": 2.991, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00042042560931247725, | |
| "loss": 2.9764, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00041815205529283376, | |
| "loss": 2.9587, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.0004158785012731903, | |
| "loss": 2.9651, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00041360494725354674, | |
| "loss": 2.9606, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00041133139323390325, | |
| "loss": 2.9448, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00040905783921425977, | |
| "loss": 2.9585, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00040678428519461623, | |
| "loss": 2.9607, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00040451073117497274, | |
| "loss": 2.9199, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0004022371771553292, | |
| "loss": 2.9303, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0003999636231356857, | |
| "loss": 2.9353, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00039769006911604223, | |
| "loss": 2.9123, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.0003954165150963987, | |
| "loss": 2.9161, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.0003931429610767552, | |
| "loss": 2.9146, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00039086940705711167, | |
| "loss": 2.9086, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.0003885958530374682, | |
| "loss": 2.9094, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.0003863222990178247, | |
| "loss": 2.8968, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00038404874499818115, | |
| "loss": 2.9008, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.0003817751909785376, | |
| "loss": 2.8836, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0003795016369588942, | |
| "loss": 2.8773, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00037722808293925064, | |
| "loss": 2.8648, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00037495452891960716, | |
| "loss": 2.8699, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.0003726809748999636, | |
| "loss": 2.8693, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00037040742088032013, | |
| "loss": 2.8619, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00036813386686067665, | |
| "loss": 2.8591, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.0003658603128410331, | |
| "loss": 2.8614, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00036358675882138957, | |
| "loss": 2.8605, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00036131320480174613, | |
| "loss": 2.8357, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.0003590396507821026, | |
| "loss": 2.847, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00035676609676245905, | |
| "loss": 2.8503, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00035449254274281557, | |
| "loss": 2.8334, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.0003522189887231721, | |
| "loss": 2.8424, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.0003499454347035286, | |
| "loss": 2.8295, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00034767188068388506, | |
| "loss": 2.8359, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0003453983266642415, | |
| "loss": 2.8252, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00034312477264459803, | |
| "loss": 2.82, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00034085121862495455, | |
| "loss": 2.8134, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 0.000338577664605311, | |
| "loss": 2.8168, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 0.0003363041105856675, | |
| "loss": 2.8248, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 0.000334030556566024, | |
| "loss": 2.8096, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00033175700254638055, | |
| "loss": 2.8154, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.000329483448526737, | |
| "loss": 2.8015, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 0.00032720989450709347, | |
| "loss": 2.8024, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.00032493634048745, | |
| "loss": 2.7998, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.0003226627864678065, | |
| "loss": 2.8011, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 0.00032038923244816296, | |
| "loss": 2.7963, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 0.00031811567842851947, | |
| "loss": 2.7863, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.00031584212440887593, | |
| "loss": 2.7973, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.0003135685703892325, | |
| "loss": 2.788, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.00031129501636958896, | |
| "loss": 2.8026, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 0.0003090214623499454, | |
| "loss": 2.7902, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.00030674790833030193, | |
| "loss": 2.7684, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00030447435431065845, | |
| "loss": 2.7846, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.0003022008002910149, | |
| "loss": 2.7703, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 0.0002999272462713714, | |
| "loss": 2.7746, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.0002976536922517279, | |
| "loss": 2.7674, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.00029538013823208445, | |
| "loss": 2.7686, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.0002931065842124409, | |
| "loss": 2.7773, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.00029083303019279737, | |
| "loss": 2.7475, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.0002885594761731539, | |
| "loss": 2.7616, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.00028628592215351035, | |
| "loss": 2.756, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.00028401236813386686, | |
| "loss": 2.7548, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.0002817388141142234, | |
| "loss": 2.7499, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 0.00027946526009457983, | |
| "loss": 2.7637, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.00027719170607493635, | |
| "loss": 2.7349, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.00027491815205529286, | |
| "loss": 2.7346, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 0.0002726445980356493, | |
| "loss": 2.7384, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00027037104401600584, | |
| "loss": 2.724, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.0002680974899963623, | |
| "loss": 2.7508, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 0.0002658239359767188, | |
| "loss": 2.7523, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.0002635503819570753, | |
| "loss": 2.7298, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.0002612768279374318, | |
| "loss": 2.7281, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.0002590032739177883, | |
| "loss": 2.7215, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.0002567297198981448, | |
| "loss": 2.7318, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.0002544561658785013, | |
| "loss": 2.7282, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 0.0002521826118588578, | |
| "loss": 2.7313, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.0002499090578392143, | |
| "loss": 2.7133, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 0.00024763550381957076, | |
| "loss": 2.6344, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.0002453619497999273, | |
| "loss": 2.6231, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.00024308839578028374, | |
| "loss": 2.609, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00024081484176064022, | |
| "loss": 2.6154, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.00023854128774099674, | |
| "loss": 2.6371, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.0002362677337213532, | |
| "loss": 2.6082, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 0.0002339941797017097, | |
| "loss": 2.6218, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 0.0002317206256820662, | |
| "loss": 2.6284, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.00022944707166242271, | |
| "loss": 2.6191, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.00022717351764277917, | |
| "loss": 2.6252, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 0.0002248999636231357, | |
| "loss": 2.6266, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00022262640960349218, | |
| "loss": 2.6426, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.0002203528555838487, | |
| "loss": 2.6072, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 0.00021807930156420515, | |
| "loss": 2.614, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.00021580574754456166, | |
| "loss": 2.5959, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.00021353219352491815, | |
| "loss": 2.6065, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.00021125863950527467, | |
| "loss": 2.6163, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.00020898508548563113, | |
| "loss": 2.6083, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 0.00020671153146598764, | |
| "loss": 2.6119, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 0.00020443797744634413, | |
| "loss": 2.6023, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00020216442342670064, | |
| "loss": 2.6086, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 0.0001998908694070571, | |
| "loss": 2.5988, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00019761731538741362, | |
| "loss": 2.6046, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.0001953437613677701, | |
| "loss": 2.5929, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.00019307020734812662, | |
| "loss": 2.5935, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 0.00019079665332848308, | |
| "loss": 2.5875, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.00018852309930883956, | |
| "loss": 2.6001, | |
| "step": 6850 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.00018624954528919608, | |
| "loss": 2.6021, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.00018397599126955257, | |
| "loss": 2.5887, | |
| "step": 6950 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.00018170243724990905, | |
| "loss": 2.6022, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.00017942888323026554, | |
| "loss": 2.5965, | |
| "step": 7050 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.00017715532921062205, | |
| "loss": 2.5879, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.00017488177519097854, | |
| "loss": 2.5935, | |
| "step": 7150 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 0.00017260822117133503, | |
| "loss": 2.6041, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.00017033466715169152, | |
| "loss": 2.6041, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00016806111313204803, | |
| "loss": 2.6024, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.00016578755911240452, | |
| "loss": 2.5995, | |
| "step": 7350 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.000163514005092761, | |
| "loss": 2.5959, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.0001612404510731175, | |
| "loss": 2.579, | |
| "step": 7450 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 0.000158966897053474, | |
| "loss": 2.5807, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.0001566933430338305, | |
| "loss": 2.5922, | |
| "step": 7550 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00015441978901418698, | |
| "loss": 2.5729, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 0.00015214623499454347, | |
| "loss": 2.5677, | |
| "step": 7650 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 0.00014987268097489998, | |
| "loss": 2.5841, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00014759912695525647, | |
| "loss": 2.5908, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.00014532557293561296, | |
| "loss": 2.5857, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 0.00014305201891596944, | |
| "loss": 2.588, | |
| "step": 7850 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 0.00014077846489632596, | |
| "loss": 2.5843, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 0.00013850491087668244, | |
| "loss": 2.5802, | |
| "step": 7950 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 0.0001362313568570389, | |
| "loss": 2.5631, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 0.00013395780283739542, | |
| "loss": 2.5767, | |
| "step": 8050 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 0.0001316842488177519, | |
| "loss": 2.563, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 0.00012941069479810842, | |
| "loss": 2.5844, | |
| "step": 8150 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 0.00012713714077846488, | |
| "loss": 2.5721, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.0001248635867588214, | |
| "loss": 2.5871, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 0.00012259003273917788, | |
| "loss": 2.5781, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 0.00012031647871953438, | |
| "loss": 2.5646, | |
| "step": 8350 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 0.00011804292469989088, | |
| "loss": 2.5614, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 0.00011576937068024737, | |
| "loss": 2.568, | |
| "step": 8450 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 0.00011349581666060387, | |
| "loss": 2.5592, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 0.00011122226264096034, | |
| "loss": 2.5672, | |
| "step": 8550 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 0.00010894870862131685, | |
| "loss": 2.5649, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 0.00010667515460167333, | |
| "loss": 2.5686, | |
| "step": 8650 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 0.00010440160058202983, | |
| "loss": 2.5758, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 0.00010212804656238632, | |
| "loss": 2.565, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 9.985449254274281e-05, | |
| "loss": 2.5904, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 9.758093852309931e-05, | |
| "loss": 2.5668, | |
| "step": 8850 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 9.53073845034558e-05, | |
| "loss": 2.5714, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 9.30338304838123e-05, | |
| "loss": 2.5511, | |
| "step": 8950 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 9.076027646416878e-05, | |
| "loss": 2.5756, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 8.848672244452528e-05, | |
| "loss": 2.5688, | |
| "step": 9050 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 8.621316842488177e-05, | |
| "loss": 2.5567, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 8.393961440523827e-05, | |
| "loss": 2.561, | |
| "step": 9150 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 8.166606038559476e-05, | |
| "loss": 2.5525, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 7.939250636595126e-05, | |
| "loss": 2.5665, | |
| "step": 9250 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 7.711895234630775e-05, | |
| "loss": 2.5686, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 7.484539832666425e-05, | |
| "loss": 2.5627, | |
| "step": 9350 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 7.257184430702073e-05, | |
| "loss": 2.5454, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 7.029829028737724e-05, | |
| "loss": 2.5677, | |
| "step": 9450 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 6.802473626773372e-05, | |
| "loss": 2.572, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 6.575118224809022e-05, | |
| "loss": 2.5468, | |
| "step": 9550 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 6.347762822844671e-05, | |
| "loss": 2.5627, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 6.12040742088032e-05, | |
| "loss": 2.5523, | |
| "step": 9650 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 5.89305201891597e-05, | |
| "loss": 2.559, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 5.665696616951619e-05, | |
| "loss": 2.5542, | |
| "step": 9750 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 5.4383412149872686e-05, | |
| "loss": 2.5632, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 5.210985813022918e-05, | |
| "loss": 2.5646, | |
| "step": 9850 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 4.9836304110585674e-05, | |
| "loss": 2.545, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 4.756275009094216e-05, | |
| "loss": 2.5525, | |
| "step": 9950 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 4.5289196071298655e-05, | |
| "loss": 2.5823, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 4.301564205165515e-05, | |
| "loss": 2.5547, | |
| "step": 10050 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 4.074208803201164e-05, | |
| "loss": 2.5645, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 3.846853401236814e-05, | |
| "loss": 2.5493, | |
| "step": 10150 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 3.619497999272463e-05, | |
| "loss": 2.5531, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 3.392142597308112e-05, | |
| "loss": 2.5537, | |
| "step": 10250 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 3.164787195343761e-05, | |
| "loss": 2.5428, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 2.937431793379411e-05, | |
| "loss": 2.5534, | |
| "step": 10350 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 2.71007639141506e-05, | |
| "loss": 2.5462, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 2.4827209894507094e-05, | |
| "loss": 2.5555, | |
| "step": 10450 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 2.2553655874863588e-05, | |
| "loss": 2.5622, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 2.028010185522008e-05, | |
| "loss": 2.5599, | |
| "step": 10550 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 1.8006547835576576e-05, | |
| "loss": 2.5406, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 1.5732993815933066e-05, | |
| "loss": 2.5642, | |
| "step": 10650 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 1.345943979628956e-05, | |
| "loss": 2.5589, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 1.1185885776646052e-05, | |
| "loss": 2.5585, | |
| "step": 10750 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 8.912331757002546e-06, | |
| "loss": 2.555, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 6.638777737359039e-06, | |
| "loss": 2.5504, | |
| "step": 10850 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 4.3652237177155325e-06, | |
| "loss": 2.5553, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 2.0916696980720264e-06, | |
| "loss": 2.5582, | |
| "step": 10950 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 10996, | |
| "total_flos": 7.20906520790827e+17, | |
| "train_loss": 2.7760973889075786, | |
| "train_runtime": 9463.0789, | |
| "train_samples_per_second": 37.188, | |
| "train_steps_per_second": 1.162 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 10996, | |
| "num_train_epochs": 2, | |
| "save_steps": 2500, | |
| "total_flos": 7.20906520790827e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |