| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.999259807549963, | |
| "eval_steps": 100, | |
| "global_step": 675, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.007401924500370096, | |
| "grad_norm": 0.6677651277112562, | |
| "learning_rate": 1.4705882352941177e-06, | |
| "loss": 1.0786, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.014803849000740192, | |
| "grad_norm": 0.5722064349275372, | |
| "learning_rate": 2.9411764705882355e-06, | |
| "loss": 1.0903, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02220577350111029, | |
| "grad_norm": 0.35990263606190365, | |
| "learning_rate": 4.411764705882353e-06, | |
| "loss": 1.0631, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.029607698001480384, | |
| "grad_norm": 0.38649057268135256, | |
| "learning_rate": 5.882352941176471e-06, | |
| "loss": 1.0153, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.037009622501850484, | |
| "grad_norm": 0.32955202530777505, | |
| "learning_rate": 7.352941176470589e-06, | |
| "loss": 0.9792, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.04441154700222058, | |
| "grad_norm": 0.25684601653269357, | |
| "learning_rate": 8.823529411764707e-06, | |
| "loss": 0.9341, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.05181347150259067, | |
| "grad_norm": 0.18647088366865341, | |
| "learning_rate": 1.0294117647058823e-05, | |
| "loss": 0.8972, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.05921539600296077, | |
| "grad_norm": 0.18552772548396557, | |
| "learning_rate": 1.1764705882352942e-05, | |
| "loss": 0.8749, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.06661732050333087, | |
| "grad_norm": 0.17707650341467074, | |
| "learning_rate": 1.323529411764706e-05, | |
| "loss": 0.8592, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.07401924500370097, | |
| "grad_norm": 0.1888814710520619, | |
| "learning_rate": 1.4705882352941179e-05, | |
| "loss": 0.8706, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.08142116950407106, | |
| "grad_norm": 0.16771182697525636, | |
| "learning_rate": 1.6176470588235296e-05, | |
| "loss": 0.8633, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.08882309400444116, | |
| "grad_norm": 0.17693257965328252, | |
| "learning_rate": 1.7647058823529414e-05, | |
| "loss": 0.8375, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.09622501850481126, | |
| "grad_norm": 0.17368427495644334, | |
| "learning_rate": 1.911764705882353e-05, | |
| "loss": 0.833, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.10362694300518134, | |
| "grad_norm": 0.20747768951612397, | |
| "learning_rate": 1.9999464266898485e-05, | |
| "loss": 0.8274, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.11102886750555144, | |
| "grad_norm": 0.19776111155184978, | |
| "learning_rate": 1.9993437928712977e-05, | |
| "loss": 0.8218, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.11843079200592153, | |
| "grad_norm": 0.18864224844745114, | |
| "learning_rate": 1.998071963486563e-05, | |
| "loss": 0.8109, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.12583271650629163, | |
| "grad_norm": 0.17484784010530582, | |
| "learning_rate": 1.9961317901970953e-05, | |
| "loss": 0.7995, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.13323464100666174, | |
| "grad_norm": 0.18875618975921596, | |
| "learning_rate": 1.993524572210807e-05, | |
| "loss": 0.7991, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.14063656550703182, | |
| "grad_norm": 0.20019313739033587, | |
| "learning_rate": 1.990252055412077e-05, | |
| "loss": 0.7947, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.14803849000740193, | |
| "grad_norm": 0.1977811468739612, | |
| "learning_rate": 1.9863164311926433e-05, | |
| "loss": 0.8215, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14803849000740193, | |
| "eval_loss": 0.8237107992172241, | |
| "eval_runtime": 7.4399, | |
| "eval_samples_per_second": 17.204, | |
| "eval_steps_per_second": 2.151, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15544041450777202, | |
| "grad_norm": 0.1703033428273923, | |
| "learning_rate": 1.981720334984174e-05, | |
| "loss": 0.7958, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.16284233900814213, | |
| "grad_norm": 0.17213094767585055, | |
| "learning_rate": 1.9764668444934853e-05, | |
| "loss": 0.7895, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1702442635085122, | |
| "grad_norm": 0.19306907524923636, | |
| "learning_rate": 1.970559477641606e-05, | |
| "loss": 0.766, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.17764618800888232, | |
| "grad_norm": 0.1823172169804357, | |
| "learning_rate": 1.9640021902080523e-05, | |
| "loss": 0.7963, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1850481125092524, | |
| "grad_norm": 0.15225193764112274, | |
| "learning_rate": 1.9567993731818988e-05, | |
| "loss": 0.7954, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.19245003700962252, | |
| "grad_norm": 0.17702912049055494, | |
| "learning_rate": 1.9489558498214197e-05, | |
| "loss": 0.7875, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.1998519615099926, | |
| "grad_norm": 0.16536442759668538, | |
| "learning_rate": 1.9404768724242667e-05, | |
| "loss": 0.7732, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.20725388601036268, | |
| "grad_norm": 0.17695469917627027, | |
| "learning_rate": 1.931368118810346e-05, | |
| "loss": 0.798, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2146558105107328, | |
| "grad_norm": 0.18668537652959327, | |
| "learning_rate": 1.92163568851975e-05, | |
| "loss": 0.7788, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.22205773501110287, | |
| "grad_norm": 0.19092408401343772, | |
| "learning_rate": 1.911286098728296e-05, | |
| "loss": 0.7753, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.22945965951147299, | |
| "grad_norm": 0.18559715805668162, | |
| "learning_rate": 1.900326279883392e-05, | |
| "loss": 0.8045, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.23686158401184307, | |
| "grad_norm": 0.18303788735937754, | |
| "learning_rate": 1.8887635710631716e-05, | |
| "loss": 0.8076, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.24426350851221318, | |
| "grad_norm": 0.1727421146270662, | |
| "learning_rate": 1.8766057150619865e-05, | |
| "loss": 0.7806, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.25166543301258326, | |
| "grad_norm": 0.16831652909455164, | |
| "learning_rate": 1.8638608532055635e-05, | |
| "loss": 0.7975, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.25906735751295334, | |
| "grad_norm": 0.16233969726713526, | |
| "learning_rate": 1.8505375198992856e-05, | |
| "loss": 0.7862, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.2664692820133235, | |
| "grad_norm": 0.17738766153741692, | |
| "learning_rate": 1.836644636913258e-05, | |
| "loss": 0.7573, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.27387120651369357, | |
| "grad_norm": 0.18574919808380788, | |
| "learning_rate": 1.8221915074079764e-05, | |
| "loss": 0.7807, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.28127313101406365, | |
| "grad_norm": 0.19177650849967526, | |
| "learning_rate": 1.8071878097046064e-05, | |
| "loss": 0.7589, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.28867505551443373, | |
| "grad_norm": 0.16334964744370573, | |
| "learning_rate": 1.7916435908040413e-05, | |
| "loss": 0.7753, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.29607698001480387, | |
| "grad_norm": 0.16244944722916593, | |
| "learning_rate": 1.7755692596590778e-05, | |
| "loss": 0.7775, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.29607698001480387, | |
| "eval_loss": 0.792992353439331, | |
| "eval_runtime": 7.4106, | |
| "eval_samples_per_second": 17.273, | |
| "eval_steps_per_second": 2.159, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.30347890451517395, | |
| "grad_norm": 0.17715403064277388, | |
| "learning_rate": 1.7589755802042188e-05, | |
| "loss": 0.776, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.31088082901554404, | |
| "grad_norm": 0.17508986500166754, | |
| "learning_rate": 1.7418736641477636e-05, | |
| "loss": 0.7591, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3182827535159141, | |
| "grad_norm": 0.15999470402359026, | |
| "learning_rate": 1.7242749635310222e-05, | |
| "loss": 0.7607, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.32568467801628426, | |
| "grad_norm": 0.1626819033920982, | |
| "learning_rate": 1.7061912630596252e-05, | |
| "loss": 0.7632, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.33308660251665434, | |
| "grad_norm": 0.18407704372355851, | |
| "learning_rate": 1.6876346722120747e-05, | |
| "loss": 0.778, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.3404885270170244, | |
| "grad_norm": 0.17328606506049593, | |
| "learning_rate": 1.6686176171308125e-05, | |
| "loss": 0.8005, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.3478904515173945, | |
| "grad_norm": 0.16916776680028325, | |
| "learning_rate": 1.6491528323012412e-05, | |
| "loss": 0.7623, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.35529237601776464, | |
| "grad_norm": 0.15837424098002126, | |
| "learning_rate": 1.6292533520242663e-05, | |
| "loss": 0.7646, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3626943005181347, | |
| "grad_norm": 0.1648529296157822, | |
| "learning_rate": 1.6089325016880737e-05, | |
| "loss": 0.7553, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3700962250185048, | |
| "grad_norm": 0.1722042996400624, | |
| "learning_rate": 1.588203888844982e-05, | |
| "loss": 0.7707, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3774981495188749, | |
| "grad_norm": 0.16050551428905252, | |
| "learning_rate": 1.5670813940993504e-05, | |
| "loss": 0.7435, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.38490007401924503, | |
| "grad_norm": 0.1468845556389716, | |
| "learning_rate": 1.5455791618126407e-05, | |
| "loss": 0.7357, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3923019985196151, | |
| "grad_norm": 0.15657890189823614, | |
| "learning_rate": 1.5237115906318565e-05, | |
| "loss": 0.7596, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.3997039230199852, | |
| "grad_norm": 0.1722407778545654, | |
| "learning_rate": 1.5014933238477069e-05, | |
| "loss": 0.7405, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.4071058475203553, | |
| "grad_norm": 0.1715058049538323, | |
| "learning_rate": 1.4789392395889468e-05, | |
| "loss": 0.766, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.41450777202072536, | |
| "grad_norm": 0.1611503458192985, | |
| "learning_rate": 1.4560644408594602e-05, | |
| "loss": 0.7464, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4219096965210955, | |
| "grad_norm": 0.1661548200950646, | |
| "learning_rate": 1.432884245424761e-05, | |
| "loss": 0.7584, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.4293116210214656, | |
| "grad_norm": 0.16927831949079214, | |
| "learning_rate": 1.4094141755546816e-05, | |
| "loss": 0.7858, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.43671354552183567, | |
| "grad_norm": 0.1574850762747471, | |
| "learning_rate": 1.3856699476291176e-05, | |
| "loss": 0.7455, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.44411547002220575, | |
| "grad_norm": 0.1744882309738823, | |
| "learning_rate": 1.3616674616137902e-05, | |
| "loss": 0.7674, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.44411547002220575, | |
| "eval_loss": 0.7782045602798462, | |
| "eval_runtime": 7.4217, | |
| "eval_samples_per_second": 17.247, | |
| "eval_steps_per_second": 2.156, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4515173945225759, | |
| "grad_norm": 0.1618448599435551, | |
| "learning_rate": 1.3374227904130724e-05, | |
| "loss": 0.7574, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.45891931902294597, | |
| "grad_norm": 0.16272430492033355, | |
| "learning_rate": 1.3129521691070108e-05, | |
| "loss": 0.7354, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.46632124352331605, | |
| "grad_norm": 0.17805107895523076, | |
| "learning_rate": 1.2882719840797473e-05, | |
| "loss": 0.7539, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.47372316802368614, | |
| "grad_norm": 0.17630754983453525, | |
| "learning_rate": 1.2633987620466229e-05, | |
| "loss": 0.7379, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4811250925240563, | |
| "grad_norm": 0.1617923521402878, | |
| "learning_rate": 1.2383491589873122e-05, | |
| "loss": 0.743, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.48852701702442636, | |
| "grad_norm": 0.15297179161199892, | |
| "learning_rate": 1.213139948992394e-05, | |
| "loss": 0.7525, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.49592894152479644, | |
| "grad_norm": 0.19383034727623744, | |
| "learning_rate": 1.187788013030837e-05, | |
| "loss": 0.7498, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.5033308660251665, | |
| "grad_norm": 0.15637509144236988, | |
| "learning_rate": 1.1623103276459086e-05, | |
| "loss": 0.7535, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5107327905255367, | |
| "grad_norm": 0.16045862427323498, | |
| "learning_rate": 1.1367239535870913e-05, | |
| "loss": 0.7453, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.5181347150259067, | |
| "grad_norm": 0.17698858161148076, | |
| "learning_rate": 1.1110460243856051e-05, | |
| "loss": 0.7329, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5255366395262768, | |
| "grad_norm": 0.17040850730538934, | |
| "learning_rate": 1.085293734881197e-05, | |
| "loss": 0.7496, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.532938564026647, | |
| "grad_norm": 0.1516391951391077, | |
| "learning_rate": 1.0594843297078736e-05, | |
| "loss": 0.7689, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.540340488527017, | |
| "grad_norm": 0.17717536543082385, | |
| "learning_rate": 1.0336350917462925e-05, | |
| "loss": 0.7586, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.5477424130273871, | |
| "grad_norm": 0.15016943441369615, | |
| "learning_rate": 1.0077633305505402e-05, | |
| "loss": 0.7462, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5551443375277573, | |
| "grad_norm": 0.15913796873415315, | |
| "learning_rate": 9.818863707570476e-06, | |
| "loss": 0.7635, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.5625462620281273, | |
| "grad_norm": 0.16666990971754578, | |
| "learning_rate": 9.560215404834094e-06, | |
| "loss": 0.7541, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5699481865284974, | |
| "grad_norm": 0.16161835204134142, | |
| "learning_rate": 9.30186159724869e-06, | |
| "loss": 0.7171, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.5773501110288675, | |
| "grad_norm": 0.15424373108412043, | |
| "learning_rate": 9.043975287562443e-06, | |
| "loss": 0.7496, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.5847520355292376, | |
| "grad_norm": 0.17121844554933624, | |
| "learning_rate": 8.786729165470584e-06, | |
| "loss": 0.7279, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.5921539600296077, | |
| "grad_norm": 0.17445724777238197, | |
| "learning_rate": 8.530295491976338e-06, | |
| "loss": 0.7334, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5921539600296077, | |
| "eval_loss": 0.7663605213165283, | |
| "eval_runtime": 7.4181, | |
| "eval_samples_per_second": 17.255, | |
| "eval_steps_per_second": 2.157, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5995558845299778, | |
| "grad_norm": 0.15017702386342432, | |
| "learning_rate": 8.274845984038916e-06, | |
| "loss": 0.7197, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.6069578090303479, | |
| "grad_norm": 0.16598073279170417, | |
| "learning_rate": 8.020551699585843e-06, | |
| "loss": 0.7495, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6143597335307179, | |
| "grad_norm": 0.15212926433085763, | |
| "learning_rate": 7.76758292296659e-06, | |
| "loss": 0.7289, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.6217616580310881, | |
| "grad_norm": 0.16430026960784816, | |
| "learning_rate": 7.5161090509242005e-06, | |
| "loss": 0.7447, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6291635825314582, | |
| "grad_norm": 0.16670189821433365, | |
| "learning_rate": 7.2662984791613186e-06, | |
| "loss": 0.737, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.6365655070318282, | |
| "grad_norm": 0.166174564668407, | |
| "learning_rate": 7.01831848957653e-06, | |
| "loss": 0.7518, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6439674315321984, | |
| "grad_norm": 0.15187524190909013, | |
| "learning_rate": 6.772335138246548e-06, | |
| "loss": 0.7495, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.6513693560325685, | |
| "grad_norm": 0.15771310643006878, | |
| "learning_rate": 6.528513144229256e-06, | |
| "loss": 0.7454, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6587712805329385, | |
| "grad_norm": 0.1545722716082389, | |
| "learning_rate": 6.287015779262064e-06, | |
| "loss": 0.7516, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.6661732050333087, | |
| "grad_norm": 0.16486187790859883, | |
| "learning_rate": 6.048004758429451e-06, | |
| "loss": 0.7301, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6735751295336787, | |
| "grad_norm": 0.15313953292819255, | |
| "learning_rate": 5.811640131872867e-06, | |
| "loss": 0.7525, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.6809770540340488, | |
| "grad_norm": 0.15174175480942678, | |
| "learning_rate": 5.578080177615575e-06, | |
| "loss": 0.723, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.688378978534419, | |
| "grad_norm": 0.15793530605455464, | |
| "learning_rate": 5.347481295574141e-06, | |
| "loss": 0.7199, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.695780903034789, | |
| "grad_norm": 0.15600126164264752, | |
| "learning_rate": 5.119997902827584e-06, | |
| "loss": 0.7315, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7031828275351591, | |
| "grad_norm": 0.1537946952676734, | |
| "learning_rate": 4.8957823302142916e-06, | |
| "loss": 0.7381, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.7105847520355293, | |
| "grad_norm": 0.14471078551971742, | |
| "learning_rate": 4.674984720325961e-06, | |
| "loss": 0.7235, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7179866765358993, | |
| "grad_norm": 0.1485927432710996, | |
| "learning_rate": 4.457752926966888e-06, | |
| "loss": 0.7177, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.7253886010362695, | |
| "grad_norm": 0.14777285394061146, | |
| "learning_rate": 4.244232416145839e-06, | |
| "loss": 0.7364, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7327905255366395, | |
| "grad_norm": 0.14915923688072272, | |
| "learning_rate": 4.0345661686669745e-06, | |
| "loss": 0.7299, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.7401924500370096, | |
| "grad_norm": 0.1575162299441041, | |
| "learning_rate": 3.828894584384867e-06, | |
| "loss": 0.7383, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7401924500370096, | |
| "eval_loss": 0.7590785026550293, | |
| "eval_runtime": 7.4207, | |
| "eval_samples_per_second": 17.249, | |
| "eval_steps_per_second": 2.156, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7475943745373798, | |
| "grad_norm": 0.14090130126556033, | |
| "learning_rate": 3.62735538818787e-06, | |
| "loss": 0.7225, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.7549962990377498, | |
| "grad_norm": 0.16015231295751484, | |
| "learning_rate": 3.4300835377726904e-06, | |
| "loss": 0.7258, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7623982235381199, | |
| "grad_norm": 0.15634890673943705, | |
| "learning_rate": 3.2372111332720045e-06, | |
| "loss": 0.7615, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.7698001480384901, | |
| "grad_norm": 0.15492129407975255, | |
| "learning_rate": 3.048867328795588e-06, | |
| "loss": 0.7185, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.7772020725388601, | |
| "grad_norm": 0.1536820552855932, | |
| "learning_rate": 2.865178245944218e-06, | |
| "loss": 0.7172, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.7846039970392302, | |
| "grad_norm": 0.14404942403792287, | |
| "learning_rate": 2.686266889354211e-06, | |
| "loss": 0.7402, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.7920059215396003, | |
| "grad_norm": 0.15061771515240063, | |
| "learning_rate": 2.5122530643292274e-06, | |
| "loss": 0.7457, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.7994078460399704, | |
| "grad_norm": 0.15931765312029125, | |
| "learning_rate": 2.3432532966144526e-06, | |
| "loss": 0.735, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8068097705403405, | |
| "grad_norm": 0.14880606411370811, | |
| "learning_rate": 2.1793807543668857e-06, | |
| "loss": 0.7369, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.8142116950407106, | |
| "grad_norm": 0.13955804663380328, | |
| "learning_rate": 2.0207451723739633e-06, | |
| "loss": 0.7286, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8216136195410807, | |
| "grad_norm": 0.14222438381196842, | |
| "learning_rate": 1.8674527785713247e-06, | |
| "loss": 0.7352, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.8290155440414507, | |
| "grad_norm": 0.15493563458184517, | |
| "learning_rate": 1.7196062229088606e-06, | |
| "loss": 0.7024, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8364174685418209, | |
| "grad_norm": 0.15849673501104045, | |
| "learning_rate": 1.577304508612717e-06, | |
| "loss": 0.7327, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.843819393042191, | |
| "grad_norm": 0.1319970212658075, | |
| "learning_rate": 1.4406429258892762e-06, | |
| "loss": 0.7533, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.851221317542561, | |
| "grad_norm": 0.1537421430544868, | |
| "learning_rate": 1.3097129881154936e-06, | |
| "loss": 0.7227, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.8586232420429312, | |
| "grad_norm": 0.14761812731310603, | |
| "learning_rate": 1.1846023705583442e-06, | |
| "loss": 0.7192, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.8660251665433013, | |
| "grad_norm": 0.1467426757613686, | |
| "learning_rate": 1.065394851664394e-06, | |
| "loss": 0.7372, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.8734270910436713, | |
| "grad_norm": 0.13796190944318665, | |
| "learning_rate": 9.521702569588199e-07, | |
| "loss": 0.7567, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.8808290155440415, | |
| "grad_norm": 0.1465576861864633, | |
| "learning_rate": 8.450044055914497e-07, | |
| "loss": 0.7251, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.8882309400444115, | |
| "grad_norm": 0.1438541063374612, | |
| "learning_rate": 7.439690595656013e-07, | |
| "loss": 0.7476, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8882309400444115, | |
| "eval_loss": 0.756076455116272, | |
| "eval_runtime": 7.4191, | |
| "eval_samples_per_second": 17.253, | |
| "eval_steps_per_second": 2.157, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8956328645447816, | |
| "grad_norm": 0.15935861793163747, | |
| "learning_rate": 6.491318756837417e-07, | |
| "loss": 0.7326, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.9030347890451518, | |
| "grad_norm": 0.13053574456346068, | |
| "learning_rate": 5.605563602421149e-07, | |
| "loss": 0.7086, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9104367135455218, | |
| "grad_norm": 0.14030080200508646, | |
| "learning_rate": 4.783018265047179e-07, | |
| "loss": 0.7587, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.9178386380458919, | |
| "grad_norm": 0.14930285941092133, | |
| "learning_rate": 4.024233549850509e-07, | |
| "loss": 0.7466, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9252405625462621, | |
| "grad_norm": 0.138865713951949, | |
| "learning_rate": 3.329717565622825e-07, | |
| "loss": 0.7435, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.9326424870466321, | |
| "grad_norm": 0.13900920647213558, | |
| "learning_rate": 2.6999353845651113e-07, | |
| "loss": 0.7269, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9400444115470022, | |
| "grad_norm": 0.14964928985956502, | |
| "learning_rate": 2.1353087308590314e-07, | |
| "loss": 0.742, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.9474463360473723, | |
| "grad_norm": 0.14169454043564503, | |
| "learning_rate": 1.6362156982656085e-07, | |
| "loss": 0.732, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9548482605477424, | |
| "grad_norm": 0.13204403743757542, | |
| "learning_rate": 1.2029904969404482e-07, | |
| "loss": 0.7158, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.9622501850481125, | |
| "grad_norm": 0.15194221565424584, | |
| "learning_rate": 8.359232296349163e-08, | |
| "loss": 0.7192, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9696521095484826, | |
| "grad_norm": 0.1435144775044456, | |
| "learning_rate": 5.3525969743324356e-08, | |
| "loss": 0.7349, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.9770540340488527, | |
| "grad_norm": 0.13844204120911296, | |
| "learning_rate": 3.012012351554017e-08, | |
| "loss": 0.7093, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.9844559585492227, | |
| "grad_norm": 0.15365673275391972, | |
| "learning_rate": 1.3390457653639221e-08, | |
| "loss": 0.7378, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.9918578830495929, | |
| "grad_norm": 0.16372514723707865, | |
| "learning_rate": 3.3481749271768726e-09, | |
| "loss": 0.7492, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.999259807549963, | |
| "grad_norm": 0.14453971027117873, | |
| "learning_rate": 0.0, | |
| "loss": 0.7185, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.999259807549963, | |
| "step": 675, | |
| "total_flos": 5.2170668949005926e+17, | |
| "train_loss": 0.7709741959748445, | |
| "train_runtime": 4715.7656, | |
| "train_samples_per_second": 4.583, | |
| "train_steps_per_second": 0.143 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 675, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.2170668949005926e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |