| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.999259807549963, | |
| "eval_steps": 100, | |
| "global_step": 675, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.007401924500370096, | |
| "grad_norm": 2.6709367750052144, | |
| "learning_rate": 1.4705882352941177e-06, | |
| "loss": 1.0786, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.014803849000740192, | |
| "grad_norm": 2.2871070474818787, | |
| "learning_rate": 2.9411764705882355e-06, | |
| "loss": 1.0902, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02220577350111029, | |
| "grad_norm": 1.4182795521636087, | |
| "learning_rate": 4.411764705882353e-06, | |
| "loss": 1.062, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.029607698001480384, | |
| "grad_norm": 1.3764280404457665, | |
| "learning_rate": 5.882352941176471e-06, | |
| "loss": 1.0086, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.037009622501850484, | |
| "grad_norm": 1.1156261369748648, | |
| "learning_rate": 7.352941176470589e-06, | |
| "loss": 0.9684, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.04441154700222058, | |
| "grad_norm": 0.9545532440344103, | |
| "learning_rate": 8.823529411764707e-06, | |
| "loss": 0.9217, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.05181347150259067, | |
| "grad_norm": 0.7611871553908859, | |
| "learning_rate": 1.0294117647058823e-05, | |
| "loss": 0.8858, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.05921539600296077, | |
| "grad_norm": 0.6854484767024068, | |
| "learning_rate": 1.1764705882352942e-05, | |
| "loss": 0.8631, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.06661732050333087, | |
| "grad_norm": 0.750666680519459, | |
| "learning_rate": 1.323529411764706e-05, | |
| "loss": 0.8487, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.07401924500370097, | |
| "grad_norm": 0.8172244317532135, | |
| "learning_rate": 1.4705882352941179e-05, | |
| "loss": 0.861, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.08142116950407106, | |
| "grad_norm": 0.6258688920241917, | |
| "learning_rate": 1.6176470588235296e-05, | |
| "loss": 0.8545, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.08882309400444116, | |
| "grad_norm": 0.7311294156696475, | |
| "learning_rate": 1.7647058823529414e-05, | |
| "loss": 0.8293, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.09622501850481126, | |
| "grad_norm": 0.6829997052596827, | |
| "learning_rate": 1.911764705882353e-05, | |
| "loss": 0.8259, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.10362694300518134, | |
| "grad_norm": 0.769333158518139, | |
| "learning_rate": 1.9999464266898485e-05, | |
| "loss": 0.821, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.11102886750555144, | |
| "grad_norm": 0.9661291332835485, | |
| "learning_rate": 1.9993437928712977e-05, | |
| "loss": 0.8164, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.11843079200592153, | |
| "grad_norm": 0.8313345878160039, | |
| "learning_rate": 1.998071963486563e-05, | |
| "loss": 0.8061, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.12583271650629163, | |
| "grad_norm": 0.6599625325000631, | |
| "learning_rate": 1.9961317901970953e-05, | |
| "loss": 0.7945, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.13323464100666174, | |
| "grad_norm": 0.7556971029869796, | |
| "learning_rate": 1.993524572210807e-05, | |
| "loss": 0.7946, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.14063656550703182, | |
| "grad_norm": 0.8000011001473601, | |
| "learning_rate": 1.990252055412077e-05, | |
| "loss": 0.7905, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.14803849000740193, | |
| "grad_norm": 0.7843797614751532, | |
| "learning_rate": 1.9863164311926433e-05, | |
| "loss": 0.8172, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14803849000740193, | |
| "eval_loss": 0.8196982145309448, | |
| "eval_runtime": 7.0817, | |
| "eval_samples_per_second": 18.075, | |
| "eval_steps_per_second": 2.259, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15544041450777202, | |
| "grad_norm": 0.713372656728166, | |
| "learning_rate": 1.981720334984174e-05, | |
| "loss": 0.7921, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.16284233900814213, | |
| "grad_norm": 0.7107830515856771, | |
| "learning_rate": 1.9764668444934853e-05, | |
| "loss": 0.7858, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1702442635085122, | |
| "grad_norm": 0.7210451915786353, | |
| "learning_rate": 1.970559477641606e-05, | |
| "loss": 0.7631, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.17764618800888232, | |
| "grad_norm": 0.7328353666424424, | |
| "learning_rate": 1.9640021902080523e-05, | |
| "loss": 0.7929, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1850481125092524, | |
| "grad_norm": 0.625857980018156, | |
| "learning_rate": 1.9567993731818988e-05, | |
| "loss": 0.7916, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.19245003700962252, | |
| "grad_norm": 0.7455061248180267, | |
| "learning_rate": 1.9489558498214197e-05, | |
| "loss": 0.7842, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.1998519615099926, | |
| "grad_norm": 0.6529537900630853, | |
| "learning_rate": 1.9404768724242667e-05, | |
| "loss": 0.7703, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.20725388601036268, | |
| "grad_norm": 0.7294370711408014, | |
| "learning_rate": 1.931368118810346e-05, | |
| "loss": 0.7947, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2146558105107328, | |
| "grad_norm": 0.7485205785730842, | |
| "learning_rate": 1.92163568851975e-05, | |
| "loss": 0.7756, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.22205773501110287, | |
| "grad_norm": 0.8058314493735931, | |
| "learning_rate": 1.911286098728296e-05, | |
| "loss": 0.772, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.22945965951147299, | |
| "grad_norm": 0.700914939266579, | |
| "learning_rate": 1.900326279883392e-05, | |
| "loss": 0.8014, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.23686158401184307, | |
| "grad_norm": 0.7020963336241166, | |
| "learning_rate": 1.8887635710631716e-05, | |
| "loss": 0.8046, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.24426350851221318, | |
| "grad_norm": 0.7005897864198626, | |
| "learning_rate": 1.8766057150619865e-05, | |
| "loss": 0.7775, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.25166543301258326, | |
| "grad_norm": 0.7058185749313685, | |
| "learning_rate": 1.8638608532055635e-05, | |
| "loss": 0.7947, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.25906735751295334, | |
| "grad_norm": 0.664038868478026, | |
| "learning_rate": 1.8505375198992856e-05, | |
| "loss": 0.7831, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.2664692820133235, | |
| "grad_norm": 0.72014538282932, | |
| "learning_rate": 1.836644636913258e-05, | |
| "loss": 0.7542, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.27387120651369357, | |
| "grad_norm": 0.7770026346024808, | |
| "learning_rate": 1.8221915074079764e-05, | |
| "loss": 0.7779, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.28127313101406365, | |
| "grad_norm": 0.865663637067605, | |
| "learning_rate": 1.8071878097046064e-05, | |
| "loss": 0.7563, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.28867505551443373, | |
| "grad_norm": 0.6184476258823719, | |
| "learning_rate": 1.7916435908040413e-05, | |
| "loss": 0.7724, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.29607698001480387, | |
| "grad_norm": 0.6494917102142564, | |
| "learning_rate": 1.7755692596590778e-05, | |
| "loss": 0.7746, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.29607698001480387, | |
| "eval_loss": 0.7898926138877869, | |
| "eval_runtime": 7.0802, | |
| "eval_samples_per_second": 18.079, | |
| "eval_steps_per_second": 2.26, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.30347890451517395, | |
| "grad_norm": 0.6973010604211403, | |
| "learning_rate": 1.7589755802042188e-05, | |
| "loss": 0.773, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.31088082901554404, | |
| "grad_norm": 0.6949606493395256, | |
| "learning_rate": 1.7418736641477636e-05, | |
| "loss": 0.7561, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3182827535159141, | |
| "grad_norm": 0.6262569109444988, | |
| "learning_rate": 1.7242749635310222e-05, | |
| "loss": 0.758, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.32568467801628426, | |
| "grad_norm": 0.6545508953720698, | |
| "learning_rate": 1.7061912630596252e-05, | |
| "loss": 0.7604, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.33308660251665434, | |
| "grad_norm": 0.7178597609986006, | |
| "learning_rate": 1.6876346722120747e-05, | |
| "loss": 0.7753, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.3404885270170244, | |
| "grad_norm": 0.6912584764916417, | |
| "learning_rate": 1.6686176171308125e-05, | |
| "loss": 0.7977, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.3478904515173945, | |
| "grad_norm": 0.6748238681034779, | |
| "learning_rate": 1.6491528323012412e-05, | |
| "loss": 0.7595, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.35529237601776464, | |
| "grad_norm": 0.6568455110309562, | |
| "learning_rate": 1.6292533520242663e-05, | |
| "loss": 0.7624, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3626943005181347, | |
| "grad_norm": 0.6696394227198411, | |
| "learning_rate": 1.6089325016880737e-05, | |
| "loss": 0.7533, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3700962250185048, | |
| "grad_norm": 0.6863856043690244, | |
| "learning_rate": 1.588203888844982e-05, | |
| "loss": 0.7684, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3774981495188749, | |
| "grad_norm": 0.6413183265768512, | |
| "learning_rate": 1.5670813940993504e-05, | |
| "loss": 0.7409, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.38490007401924503, | |
| "grad_norm": 0.592394054979568, | |
| "learning_rate": 1.5455791618126407e-05, | |
| "loss": 0.7336, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3923019985196151, | |
| "grad_norm": 0.6278449742084541, | |
| "learning_rate": 1.5237115906318565e-05, | |
| "loss": 0.7572, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.3997039230199852, | |
| "grad_norm": 0.6678910847724688, | |
| "learning_rate": 1.5014933238477069e-05, | |
| "loss": 0.7379, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.4071058475203553, | |
| "grad_norm": 0.6936079894912379, | |
| "learning_rate": 1.4789392395889468e-05, | |
| "loss": 0.7633, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.41450777202072536, | |
| "grad_norm": 0.6427795800840255, | |
| "learning_rate": 1.4560644408594602e-05, | |
| "loss": 0.7442, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4219096965210955, | |
| "grad_norm": 0.6507557085189994, | |
| "learning_rate": 1.432884245424761e-05, | |
| "loss": 0.7556, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.4293116210214656, | |
| "grad_norm": 0.6624582442770054, | |
| "learning_rate": 1.4094141755546816e-05, | |
| "loss": 0.7833, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.43671354552183567, | |
| "grad_norm": 0.6260844530638364, | |
| "learning_rate": 1.3856699476291176e-05, | |
| "loss": 0.7429, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.44411547002220575, | |
| "grad_norm": 0.6817749702455657, | |
| "learning_rate": 1.3616674616137902e-05, | |
| "loss": 0.7644, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.44411547002220575, | |
| "eval_loss": 0.7755764722824097, | |
| "eval_runtime": 7.0841, | |
| "eval_samples_per_second": 18.069, | |
| "eval_steps_per_second": 2.259, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4515173945225759, | |
| "grad_norm": 0.6557824164506009, | |
| "learning_rate": 1.3374227904130724e-05, | |
| "loss": 0.755, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.45891931902294597, | |
| "grad_norm": 0.6429161176286008, | |
| "learning_rate": 1.3129521691070108e-05, | |
| "loss": 0.7327, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.46632124352331605, | |
| "grad_norm": 0.7172336735771104, | |
| "learning_rate": 1.2882719840797473e-05, | |
| "loss": 0.7514, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.47372316802368614, | |
| "grad_norm": 0.6888969569785534, | |
| "learning_rate": 1.2633987620466229e-05, | |
| "loss": 0.7353, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4811250925240563, | |
| "grad_norm": 0.6341647872044919, | |
| "learning_rate": 1.2383491589873122e-05, | |
| "loss": 0.7406, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.48852701702442636, | |
| "grad_norm": 0.6076464907196722, | |
| "learning_rate": 1.213139948992394e-05, | |
| "loss": 0.7498, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.49592894152479644, | |
| "grad_norm": 0.7070131265743488, | |
| "learning_rate": 1.187788013030837e-05, | |
| "loss": 0.7469, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.5033308660251665, | |
| "grad_norm": 0.6137043611077291, | |
| "learning_rate": 1.1623103276459086e-05, | |
| "loss": 0.7507, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5107327905255367, | |
| "grad_norm": 0.6456952973092422, | |
| "learning_rate": 1.1367239535870913e-05, | |
| "loss": 0.7426, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.5181347150259067, | |
| "grad_norm": 0.6874472223982817, | |
| "learning_rate": 1.1110460243856051e-05, | |
| "loss": 0.7302, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5255366395262768, | |
| "grad_norm": 0.6585897080191178, | |
| "learning_rate": 1.085293734881197e-05, | |
| "loss": 0.7469, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.532938564026647, | |
| "grad_norm": 0.597859239299925, | |
| "learning_rate": 1.0594843297078736e-05, | |
| "loss": 0.766, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.540340488527017, | |
| "grad_norm": 0.6867943839454653, | |
| "learning_rate": 1.0336350917462925e-05, | |
| "loss": 0.7558, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.5477424130273871, | |
| "grad_norm": 0.6059202493597124, | |
| "learning_rate": 1.0077633305505402e-05, | |
| "loss": 0.7434, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5551443375277573, | |
| "grad_norm": 0.6312185196007014, | |
| "learning_rate": 9.818863707570476e-06, | |
| "loss": 0.7608, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.5625462620281273, | |
| "grad_norm": 0.6583669975631296, | |
| "learning_rate": 9.560215404834094e-06, | |
| "loss": 0.7516, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5699481865284974, | |
| "grad_norm": 0.6337654329864656, | |
| "learning_rate": 9.30186159724869e-06, | |
| "loss": 0.7145, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.5773501110288675, | |
| "grad_norm": 0.6063835380133286, | |
| "learning_rate": 9.043975287562443e-06, | |
| "loss": 0.747, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.5847520355292376, | |
| "grad_norm": 0.6801968160814837, | |
| "learning_rate": 8.786729165470584e-06, | |
| "loss": 0.7254, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.5921539600296077, | |
| "grad_norm": 0.6884878033137862, | |
| "learning_rate": 8.530295491976338e-06, | |
| "loss": 0.7308, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5921539600296077, | |
| "eval_loss": 0.7637947797775269, | |
| "eval_runtime": 7.0881, | |
| "eval_samples_per_second": 18.059, | |
| "eval_steps_per_second": 2.257, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5995558845299778, | |
| "grad_norm": 0.5905316945269669, | |
| "learning_rate": 8.274845984038916e-06, | |
| "loss": 0.7173, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.6069578090303479, | |
| "grad_norm": 0.652102126972111, | |
| "learning_rate": 8.020551699585843e-06, | |
| "loss": 0.7469, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6143597335307179, | |
| "grad_norm": 0.6046273530879036, | |
| "learning_rate": 7.76758292296659e-06, | |
| "loss": 0.7265, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.6217616580310881, | |
| "grad_norm": 0.6506612501289583, | |
| "learning_rate": 7.5161090509242005e-06, | |
| "loss": 0.742, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6291635825314582, | |
| "grad_norm": 0.6445503530456065, | |
| "learning_rate": 7.2662984791613186e-06, | |
| "loss": 0.7345, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.6365655070318282, | |
| "grad_norm": 0.6524566274539656, | |
| "learning_rate": 7.01831848957653e-06, | |
| "loss": 0.749, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6439674315321984, | |
| "grad_norm": 0.6006427158853699, | |
| "learning_rate": 6.772335138246548e-06, | |
| "loss": 0.7469, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.6513693560325685, | |
| "grad_norm": 0.6253883357593474, | |
| "learning_rate": 6.528513144229256e-06, | |
| "loss": 0.7427, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6587712805329385, | |
| "grad_norm": 0.6149011526684693, | |
| "learning_rate": 6.287015779262064e-06, | |
| "loss": 0.7489, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.6661732050333087, | |
| "grad_norm": 0.655303715198759, | |
| "learning_rate": 6.048004758429451e-06, | |
| "loss": 0.7275, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6735751295336787, | |
| "grad_norm": 0.60174347779124, | |
| "learning_rate": 5.811640131872867e-06, | |
| "loss": 0.7498, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.6809770540340488, | |
| "grad_norm": 0.6063626940584474, | |
| "learning_rate": 5.578080177615575e-06, | |
| "loss": 0.7201, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.688378978534419, | |
| "grad_norm": 0.6218180992736176, | |
| "learning_rate": 5.347481295574141e-06, | |
| "loss": 0.7172, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.695780903034789, | |
| "grad_norm": 0.6109254076692403, | |
| "learning_rate": 5.119997902827584e-06, | |
| "loss": 0.7287, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7031828275351591, | |
| "grad_norm": 0.6117825683298314, | |
| "learning_rate": 4.8957823302142916e-06, | |
| "loss": 0.7355, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.7105847520355293, | |
| "grad_norm": 0.5644874141920769, | |
| "learning_rate": 4.674984720325961e-06, | |
| "loss": 0.7208, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7179866765358993, | |
| "grad_norm": 0.5866643065055667, | |
| "learning_rate": 4.457752926966888e-06, | |
| "loss": 0.7151, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.7253886010362695, | |
| "grad_norm": 0.5815137410742766, | |
| "learning_rate": 4.244232416145839e-06, | |
| "loss": 0.7337, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7327905255366395, | |
| "grad_norm": 0.5893669437648787, | |
| "learning_rate": 4.0345661686669745e-06, | |
| "loss": 0.7269, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.7401924500370096, | |
| "grad_norm": 0.6231119397765088, | |
| "learning_rate": 3.828894584384867e-06, | |
| "loss": 0.7355, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7401924500370096, | |
| "eval_loss": 0.7561960220336914, | |
| "eval_runtime": 7.0784, | |
| "eval_samples_per_second": 18.083, | |
| "eval_steps_per_second": 2.26, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7475943745373798, | |
| "grad_norm": 0.5547460159002005, | |
| "learning_rate": 3.62735538818787e-06, | |
| "loss": 0.7196, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.7549962990377498, | |
| "grad_norm": 0.6311442463920164, | |
| "learning_rate": 3.4300835377726904e-06, | |
| "loss": 0.7234, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7623982235381199, | |
| "grad_norm": 0.6161479513086646, | |
| "learning_rate": 3.2372111332720045e-06, | |
| "loss": 0.7588, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.7698001480384901, | |
| "grad_norm": 0.6108773239475987, | |
| "learning_rate": 3.048867328795588e-06, | |
| "loss": 0.7157, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.7772020725388601, | |
| "grad_norm": 0.602925688413523, | |
| "learning_rate": 2.865178245944218e-06, | |
| "loss": 0.7145, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.7846039970392302, | |
| "grad_norm": 0.5701021420362337, | |
| "learning_rate": 2.686266889354211e-06, | |
| "loss": 0.7375, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.7920059215396003, | |
| "grad_norm": 0.5887011145957908, | |
| "learning_rate": 2.5122530643292274e-06, | |
| "loss": 0.7429, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.7994078460399704, | |
| "grad_norm": 0.6312752747025913, | |
| "learning_rate": 2.3432532966144526e-06, | |
| "loss": 0.7323, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8068097705403405, | |
| "grad_norm": 0.5823788876767197, | |
| "learning_rate": 2.1793807543668857e-06, | |
| "loss": 0.734, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.8142116950407106, | |
| "grad_norm": 0.5500197273786551, | |
| "learning_rate": 2.0207451723739633e-06, | |
| "loss": 0.7258, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8216136195410807, | |
| "grad_norm": 0.5605928225621666, | |
| "learning_rate": 1.8674527785713247e-06, | |
| "loss": 0.7323, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.8290155440414507, | |
| "grad_norm": 0.6153548429599589, | |
| "learning_rate": 1.7196062229088606e-06, | |
| "loss": 0.6996, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8364174685418209, | |
| "grad_norm": 0.6241569667891514, | |
| "learning_rate": 1.577304508612717e-06, | |
| "loss": 0.7298, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.843819393042191, | |
| "grad_norm": 0.5193118067897788, | |
| "learning_rate": 1.4406429258892762e-06, | |
| "loss": 0.7503, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.851221317542561, | |
| "grad_norm": 0.6129769104488084, | |
| "learning_rate": 1.3097129881154936e-06, | |
| "loss": 0.72, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.8586232420429312, | |
| "grad_norm": 0.5835803211279808, | |
| "learning_rate": 1.1846023705583442e-06, | |
| "loss": 0.7164, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.8660251665433013, | |
| "grad_norm": 0.5805836101347949, | |
| "learning_rate": 1.065394851664394e-06, | |
| "loss": 0.7345, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.8734270910436713, | |
| "grad_norm": 0.5488458370278894, | |
| "learning_rate": 9.521702569588199e-07, | |
| "loss": 0.7537, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.8808290155440415, | |
| "grad_norm": 0.574169045086665, | |
| "learning_rate": 8.450044055914497e-07, | |
| "loss": 0.7221, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.8882309400444115, | |
| "grad_norm": 0.5603143384229617, | |
| "learning_rate": 7.439690595656013e-07, | |
| "loss": 0.7446, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8882309400444115, | |
| "eval_loss": 0.753212571144104, | |
| "eval_runtime": 7.0895, | |
| "eval_samples_per_second": 18.055, | |
| "eval_steps_per_second": 2.257, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8956328645447816, | |
| "grad_norm": 0.6329369985344253, | |
| "learning_rate": 6.491318756837417e-07, | |
| "loss": 0.7299, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.9030347890451518, | |
| "grad_norm": 0.512657762146579, | |
| "learning_rate": 5.605563602421149e-07, | |
| "loss": 0.7059, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9104367135455218, | |
| "grad_norm": 0.555802795710424, | |
| "learning_rate": 4.783018265047179e-07, | |
| "loss": 0.7556, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.9178386380458919, | |
| "grad_norm": 0.5850038916370969, | |
| "learning_rate": 4.024233549850509e-07, | |
| "loss": 0.7437, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9252405625462621, | |
| "grad_norm": 0.5486195860835535, | |
| "learning_rate": 3.329717565622825e-07, | |
| "loss": 0.7404, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.9326424870466321, | |
| "grad_norm": 0.5514548625215937, | |
| "learning_rate": 2.6999353845651113e-07, | |
| "loss": 0.7241, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9400444115470022, | |
| "grad_norm": 0.5878396775004657, | |
| "learning_rate": 2.1353087308590314e-07, | |
| "loss": 0.739, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.9474463360473723, | |
| "grad_norm": 0.5583557984778206, | |
| "learning_rate": 1.6362156982656085e-07, | |
| "loss": 0.7291, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9548482605477424, | |
| "grad_norm": 0.5235524601826119, | |
| "learning_rate": 1.2029904969404482e-07, | |
| "loss": 0.7127, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.9622501850481125, | |
| "grad_norm": 0.6023587868318137, | |
| "learning_rate": 8.359232296349163e-08, | |
| "loss": 0.7165, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9696521095484826, | |
| "grad_norm": 0.5670622600030921, | |
| "learning_rate": 5.3525969743324356e-08, | |
| "loss": 0.7321, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.9770540340488527, | |
| "grad_norm": 0.5473289868827732, | |
| "learning_rate": 3.012012351554017e-08, | |
| "loss": 0.7065, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.9844559585492227, | |
| "grad_norm": 0.5803068194369324, | |
| "learning_rate": 1.3390457653639221e-08, | |
| "loss": 0.7354, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.9918578830495929, | |
| "grad_norm": 0.641603033929549, | |
| "learning_rate": 3.3481749271768726e-09, | |
| "loss": 0.7463, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.999259807549963, | |
| "grad_norm": 0.5695984279665459, | |
| "learning_rate": 0.0, | |
| "loss": 0.7159, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.999259807549963, | |
| "step": 675, | |
| "total_flos": 76888336760832.0, | |
| "train_loss": 0.7676166036393908, | |
| "train_runtime": 4800.2526, | |
| "train_samples_per_second": 4.502, | |
| "train_steps_per_second": 0.141 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 675, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 76888336760832.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |