| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 372, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013458950201884253, | |
| "grad_norm": 1.00712251663208, | |
| "learning_rate": 1.2903225806451614e-06, | |
| "loss": 1.264, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.026917900403768506, | |
| "grad_norm": 0.9430832266807556, | |
| "learning_rate": 2.9032258064516128e-06, | |
| "loss": 1.2378, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.040376850605652756, | |
| "grad_norm": 0.6684543490409851, | |
| "learning_rate": 4.516129032258065e-06, | |
| "loss": 1.239, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.05383580080753701, | |
| "grad_norm": 0.7191868424415588, | |
| "learning_rate": 6.129032258064517e-06, | |
| "loss": 1.2218, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06729475100942127, | |
| "grad_norm": 0.6867708563804626, | |
| "learning_rate": 7.741935483870968e-06, | |
| "loss": 1.2301, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.08075370121130551, | |
| "grad_norm": 0.6379647254943848, | |
| "learning_rate": 9.35483870967742e-06, | |
| "loss": 1.2194, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09421265141318977, | |
| "grad_norm": 0.5798360109329224, | |
| "learning_rate": 1.0967741935483872e-05, | |
| "loss": 1.2002, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.10767160161507403, | |
| "grad_norm": 0.5719746351242065, | |
| "learning_rate": 1.2580645161290324e-05, | |
| "loss": 1.1982, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.12113055181695828, | |
| "grad_norm": 0.5450358390808105, | |
| "learning_rate": 1.4193548387096776e-05, | |
| "loss": 1.1986, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.13458950201884254, | |
| "grad_norm": 0.5413178205490112, | |
| "learning_rate": 1.5806451612903226e-05, | |
| "loss": 1.1955, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1480484522207268, | |
| "grad_norm": 0.5677768588066101, | |
| "learning_rate": 1.741935483870968e-05, | |
| "loss": 1.1496, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.16150740242261102, | |
| "grad_norm": 0.5705103278160095, | |
| "learning_rate": 1.903225806451613e-05, | |
| "loss": 1.1377, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.17496635262449528, | |
| "grad_norm": 0.6191664934158325, | |
| "learning_rate": 2.0645161290322582e-05, | |
| "loss": 1.1242, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.18842530282637954, | |
| "grad_norm": 0.41417160630226135, | |
| "learning_rate": 2.2258064516129034e-05, | |
| "loss": 1.0923, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2018842530282638, | |
| "grad_norm": 0.5877333283424377, | |
| "learning_rate": 2.3870967741935483e-05, | |
| "loss": 1.1575, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.21534320323014805, | |
| "grad_norm": 0.5006933808326721, | |
| "learning_rate": 2.548387096774194e-05, | |
| "loss": 1.0873, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2288021534320323, | |
| "grad_norm": 0.5454208254814148, | |
| "learning_rate": 2.7096774193548387e-05, | |
| "loss": 1.073, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.24226110363391656, | |
| "grad_norm": 0.4708370268344879, | |
| "learning_rate": 2.870967741935484e-05, | |
| "loss": 1.1246, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2557200538358008, | |
| "grad_norm": 0.7446239590644836, | |
| "learning_rate": 2.9999976292388853e-05, | |
| "loss": 1.0893, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.2691790040376851, | |
| "grad_norm": 0.5205251574516296, | |
| "learning_rate": 2.999914653386729e-05, | |
| "loss": 1.0233, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.28263795423956933, | |
| "grad_norm": 0.5542618632316589, | |
| "learning_rate": 2.9997131469727287e-05, | |
| "loss": 1.0534, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.2960969044414536, | |
| "grad_norm": 0.5413870811462402, | |
| "learning_rate": 2.9993931259209015e-05, | |
| "loss": 1.0217, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.30955585464333785, | |
| "grad_norm": 0.5619310736656189, | |
| "learning_rate": 2.998954615520871e-05, | |
| "loss": 1.0357, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.32301480484522205, | |
| "grad_norm": 0.5382521152496338, | |
| "learning_rate": 2.998397650425863e-05, | |
| "loss": 1.0228, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3364737550471063, | |
| "grad_norm": 0.6172555088996887, | |
| "learning_rate": 2.997722274649974e-05, | |
| "loss": 0.9711, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.34993270524899056, | |
| "grad_norm": 0.6441250443458557, | |
| "learning_rate": 2.9969285415646874e-05, | |
| "loss": 0.9977, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3633916554508748, | |
| "grad_norm": 0.5927826166152954, | |
| "learning_rate": 2.9960165138946572e-05, | |
| "loss": 0.9778, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.3768506056527591, | |
| "grad_norm": 0.6550005078315735, | |
| "learning_rate": 2.9949862637127523e-05, | |
| "loss": 0.9335, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.39030955585464333, | |
| "grad_norm": 0.5886467099189758, | |
| "learning_rate": 2.9938378724343604e-05, | |
| "loss": 0.9681, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.4037685060565276, | |
| "grad_norm": 0.7246305346488953, | |
| "learning_rate": 2.992571430810954e-05, | |
| "loss": 0.9701, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.41722745625841184, | |
| "grad_norm": 0.7467737197875977, | |
| "learning_rate": 2.9911870389229192e-05, | |
| "loss": 0.9141, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.4306864064602961, | |
| "grad_norm": 0.7680078744888306, | |
| "learning_rate": 2.9896848061716456e-05, | |
| "loss": 0.8685, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.44414535666218036, | |
| "grad_norm": 0.7801365256309509, | |
| "learning_rate": 2.9880648512708834e-05, | |
| "loss": 0.9138, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.4576043068640646, | |
| "grad_norm": 0.6462291479110718, | |
| "learning_rate": 2.9863273022373585e-05, | |
| "loss": 0.9223, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.47106325706594887, | |
| "grad_norm": 0.6398594975471497, | |
| "learning_rate": 2.9844722963806592e-05, | |
| "loss": 0.9021, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.4845222072678331, | |
| "grad_norm": 0.683367133140564, | |
| "learning_rate": 2.9824999802923846e-05, | |
| "loss": 0.8599, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4979811574697174, | |
| "grad_norm": 0.7672361731529236, | |
| "learning_rate": 2.9804105098345575e-05, | |
| "loss": 0.9282, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.5114401076716016, | |
| "grad_norm": 0.6266528367996216, | |
| "learning_rate": 2.9782040501273126e-05, | |
| "loss": 0.8664, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.5248990578734859, | |
| "grad_norm": 0.7748366594314575, | |
| "learning_rate": 2.975880775535843e-05, | |
| "loss": 0.8588, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.5383580080753702, | |
| "grad_norm": 0.892792820930481, | |
| "learning_rate": 2.9734408696566224e-05, | |
| "loss": 0.8517, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5518169582772544, | |
| "grad_norm": 0.8755563497543335, | |
| "learning_rate": 2.9708845253028988e-05, | |
| "loss": 0.9136, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.5652759084791387, | |
| "grad_norm": 0.7859370708465576, | |
| "learning_rate": 2.9682119444894545e-05, | |
| "loss": 0.7955, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5787348586810229, | |
| "grad_norm": 0.7947808504104614, | |
| "learning_rate": 2.9654233384166435e-05, | |
| "loss": 0.8695, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.5921938088829072, | |
| "grad_norm": 0.7226110696792603, | |
| "learning_rate": 2.9625189274537004e-05, | |
| "loss": 0.7672, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6056527590847914, | |
| "grad_norm": 1.0677162408828735, | |
| "learning_rate": 2.9594989411213266e-05, | |
| "loss": 0.816, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.6191117092866757, | |
| "grad_norm": 0.8201079964637756, | |
| "learning_rate": 2.9563636180735524e-05, | |
| "loss": 0.7877, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.6325706594885598, | |
| "grad_norm": 0.961162805557251, | |
| "learning_rate": 2.9531132060788763e-05, | |
| "loss": 0.7566, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.6460296096904441, | |
| "grad_norm": 0.9038267731666565, | |
| "learning_rate": 2.9497479620006873e-05, | |
| "loss": 0.8061, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.6594885598923284, | |
| "grad_norm": 0.9068340063095093, | |
| "learning_rate": 2.9462681517769638e-05, | |
| "loss": 0.787, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.6729475100942126, | |
| "grad_norm": 0.8361156582832336, | |
| "learning_rate": 2.94267405039926e-05, | |
| "loss": 0.7835, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6864064602960969, | |
| "grad_norm": 0.8174508213996887, | |
| "learning_rate": 2.938965941890972e-05, | |
| "loss": 0.7641, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.6998654104979811, | |
| "grad_norm": 0.8101845383644104, | |
| "learning_rate": 2.9351441192848972e-05, | |
| "loss": 0.76, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.7133243606998654, | |
| "grad_norm": 0.8047847747802734, | |
| "learning_rate": 2.9312088846000733e-05, | |
| "loss": 0.7404, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.7267833109017496, | |
| "grad_norm": 0.8787263035774231, | |
| "learning_rate": 2.927160548817913e-05, | |
| "loss": 0.7335, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.7402422611036339, | |
| "grad_norm": 0.9566526412963867, | |
| "learning_rate": 2.9229994318576295e-05, | |
| "loss": 0.7546, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.7537012113055181, | |
| "grad_norm": 0.9340652227401733, | |
| "learning_rate": 2.9187258625509518e-05, | |
| "loss": 0.6814, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7671601615074024, | |
| "grad_norm": 1.0249980688095093, | |
| "learning_rate": 2.9143401786161445e-05, | |
| "loss": 0.7116, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.7806191117092867, | |
| "grad_norm": 0.8947973847389221, | |
| "learning_rate": 2.9098427266313138e-05, | |
| "loss": 0.6845, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.7940780619111709, | |
| "grad_norm": 0.9918004274368286, | |
| "learning_rate": 2.9052338620070215e-05, | |
| "loss": 0.7287, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.8075370121130552, | |
| "grad_norm": 0.9002223610877991, | |
| "learning_rate": 2.9005139489582002e-05, | |
| "loss": 0.7129, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8209959623149394, | |
| "grad_norm": 0.8825509548187256, | |
| "learning_rate": 2.8956833604753688e-05, | |
| "loss": 0.6633, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.8344549125168237, | |
| "grad_norm": 0.8505289554595947, | |
| "learning_rate": 2.8907424782951587e-05, | |
| "loss": 0.6826, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.847913862718708, | |
| "grad_norm": 1.0375418663024902, | |
| "learning_rate": 2.8856916928701467e-05, | |
| "loss": 0.7003, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.8613728129205922, | |
| "grad_norm": 1.0597079992294312, | |
| "learning_rate": 2.8805314033379985e-05, | |
| "loss": 0.7151, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.8748317631224765, | |
| "grad_norm": 1.1800124645233154, | |
| "learning_rate": 2.8752620174899284e-05, | |
| "loss": 0.6594, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.8882907133243607, | |
| "grad_norm": 0.9276929497718811, | |
| "learning_rate": 2.8698839517384726e-05, | |
| "loss": 0.651, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.901749663526245, | |
| "grad_norm": 1.094962239265442, | |
| "learning_rate": 2.8643976310845828e-05, | |
| "loss": 0.6412, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.9152086137281292, | |
| "grad_norm": 0.9263754487037659, | |
| "learning_rate": 2.8588034890840408e-05, | |
| "loss": 0.6269, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.9286675639300135, | |
| "grad_norm": 0.9816908240318298, | |
| "learning_rate": 2.8531019678131952e-05, | |
| "loss": 0.6577, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.9421265141318977, | |
| "grad_norm": 1.0188283920288086, | |
| "learning_rate": 2.847293517834029e-05, | |
| "loss": 0.66, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.955585464333782, | |
| "grad_norm": 1.0503355264663696, | |
| "learning_rate": 2.8413785981585518e-05, | |
| "loss": 0.621, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.9690444145356663, | |
| "grad_norm": 0.9092134237289429, | |
| "learning_rate": 2.8353576762125274e-05, | |
| "loss": 0.6343, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.9825033647375505, | |
| "grad_norm": 0.9565970301628113, | |
| "learning_rate": 2.8292312277985354e-05, | |
| "loss": 0.6538, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.9959623149394348, | |
| "grad_norm": 1.014952301979065, | |
| "learning_rate": 2.82299973705837e-05, | |
| "loss": 0.6081, | |
| "step": 370 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 1860, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.29235739645313e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |