| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 912, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.3891168855130672, | |
| "epoch": 0.043859649122807015, | |
| "grad_norm": 18.536834716796875, | |
| "learning_rate": 2e-05, | |
| "loss": 4.281, | |
| "mean_token_accuracy": 0.5703265547752381, | |
| "num_tokens": 29513.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.749189381301403, | |
| "epoch": 0.08771929824561403, | |
| "grad_norm": 3.8398051261901855, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9625, | |
| "mean_token_accuracy": 0.6897866070270539, | |
| "num_tokens": 58932.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.169826951622963, | |
| "epoch": 0.13157894736842105, | |
| "grad_norm": 2.5061917304992676, | |
| "learning_rate": 2e-05, | |
| "loss": 1.4255, | |
| "mean_token_accuracy": 0.7357227891683579, | |
| "num_tokens": 88352.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.1334790736436844, | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 1.9473565816879272, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1315, | |
| "mean_token_accuracy": 0.7862708762288093, | |
| "num_tokens": 117877.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.8736076682806015, | |
| "epoch": 0.21929824561403508, | |
| "grad_norm": 1.7826578617095947, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8549, | |
| "mean_token_accuracy": 0.8343304082751274, | |
| "num_tokens": 147392.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5955807730555535, | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 1.6632131338119507, | |
| "learning_rate": 2e-05, | |
| "loss": 0.58, | |
| "mean_token_accuracy": 0.8876266479492188, | |
| "num_tokens": 176728.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.34382262006402015, | |
| "epoch": 0.30701754385964913, | |
| "grad_norm": 1.4843252897262573, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3445, | |
| "mean_token_accuracy": 0.9318736225366593, | |
| "num_tokens": 206184.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.19373956136405468, | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 0.904003918170929, | |
| "learning_rate": 2e-05, | |
| "loss": 0.189, | |
| "mean_token_accuracy": 0.9710342198610306, | |
| "num_tokens": 235688.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.12052652426064014, | |
| "epoch": 0.39473684210526316, | |
| "grad_norm": 0.7406700849533081, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1056, | |
| "mean_token_accuracy": 0.9886028110980988, | |
| "num_tokens": 265172.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.0992697212845087, | |
| "epoch": 0.43859649122807015, | |
| "grad_norm": 0.606019139289856, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0817, | |
| "mean_token_accuracy": 0.989163076877594, | |
| "num_tokens": 294687.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.08367474023252726, | |
| "epoch": 0.4824561403508772, | |
| "grad_norm": 0.4705955386161804, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0737, | |
| "mean_token_accuracy": 0.9905414953827858, | |
| "num_tokens": 324088.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07386345528066159, | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.4300910234451294, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0631, | |
| "mean_token_accuracy": 0.9910139158368111, | |
| "num_tokens": 353582.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.06888655042275786, | |
| "epoch": 0.5701754385964912, | |
| "grad_norm": 0.45082923769950867, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0661, | |
| "mean_token_accuracy": 0.9910464301705361, | |
| "num_tokens": 383080.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.06553246006369591, | |
| "epoch": 0.6140350877192983, | |
| "grad_norm": 0.4258216619491577, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0615, | |
| "mean_token_accuracy": 0.9914803236722947, | |
| "num_tokens": 412559.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.06352790119126439, | |
| "epoch": 0.6578947368421053, | |
| "grad_norm": 0.37360644340515137, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0579, | |
| "mean_token_accuracy": 0.9916400715708733, | |
| "num_tokens": 442008.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.060972847137600185, | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 0.3406882584095001, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0576, | |
| "mean_token_accuracy": 0.9914845108985901, | |
| "num_tokens": 471495.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.05891128294169903, | |
| "epoch": 0.7456140350877193, | |
| "grad_norm": 0.2854160964488983, | |
| "learning_rate": 2e-05, | |
| "loss": 0.057, | |
| "mean_token_accuracy": 0.9915445119142532, | |
| "num_tokens": 500954.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.0579888011328876, | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.40168505907058716, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0507, | |
| "mean_token_accuracy": 0.9923512250185013, | |
| "num_tokens": 530401.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.0573225624859333, | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 0.4589792490005493, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0561, | |
| "mean_token_accuracy": 0.9916056364774704, | |
| "num_tokens": 559940.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.05970530286431312, | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 0.5275149941444397, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0573, | |
| "mean_token_accuracy": 0.9915869951248169, | |
| "num_tokens": 589427.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.059507152158766986, | |
| "epoch": 0.9210526315789473, | |
| "grad_norm": 0.2661688029766083, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0515, | |
| "mean_token_accuracy": 0.9923170626163482, | |
| "num_tokens": 618838.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.05687206219881773, | |
| "epoch": 0.9649122807017544, | |
| "grad_norm": 0.35179731249809265, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0506, | |
| "mean_token_accuracy": 0.9918064430356026, | |
| "num_tokens": 648258.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.058181710354983804, | |
| "epoch": 1.0087719298245614, | |
| "grad_norm": 0.3514145314693451, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0522, | |
| "mean_token_accuracy": 0.9915239199995994, | |
| "num_tokens": 677779.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.055358363036066296, | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 0.3630446493625641, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0482, | |
| "mean_token_accuracy": 0.9922210231423378, | |
| "num_tokens": 707217.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.0538893367163837, | |
| "epoch": 1.0964912280701755, | |
| "grad_norm": 0.3740084767341614, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0448, | |
| "mean_token_accuracy": 0.9927210569381714, | |
| "num_tokens": 736641.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.0544189871288836, | |
| "epoch": 1.1403508771929824, | |
| "grad_norm": 0.31057634949684143, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0474, | |
| "mean_token_accuracy": 0.9922840282320976, | |
| "num_tokens": 766189.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.053078283276408914, | |
| "epoch": 1.1842105263157894, | |
| "grad_norm": 0.21449895203113556, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0468, | |
| "mean_token_accuracy": 0.9923206493258476, | |
| "num_tokens": 795639.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.05415161233395338, | |
| "epoch": 1.2280701754385965, | |
| "grad_norm": 0.30692726373672485, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0458, | |
| "mean_token_accuracy": 0.9922255620360374, | |
| "num_tokens": 825119.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 0.05160839455202222, | |
| "epoch": 1.2719298245614035, | |
| "grad_norm": 0.3166206479072571, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0446, | |
| "mean_token_accuracy": 0.9921553313732148, | |
| "num_tokens": 854581.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 0.05075615206733346, | |
| "epoch": 1.3157894736842106, | |
| "grad_norm": 0.3627457618713379, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0417, | |
| "mean_token_accuracy": 0.9924884453415871, | |
| "num_tokens": 884020.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.0517747713252902, | |
| "epoch": 1.3596491228070176, | |
| "grad_norm": 0.37561649084091187, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0435, | |
| "mean_token_accuracy": 0.992647610604763, | |
| "num_tokens": 913529.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 0.05250948471948504, | |
| "epoch": 1.4035087719298245, | |
| "grad_norm": 0.42561790347099304, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0431, | |
| "mean_token_accuracy": 0.9922929123044014, | |
| "num_tokens": 942986.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 0.05172846736386418, | |
| "epoch": 1.4473684210526316, | |
| "grad_norm": 0.2577071785926819, | |
| "learning_rate": 2e-05, | |
| "loss": 0.041, | |
| "mean_token_accuracy": 0.9923748031258584, | |
| "num_tokens": 972495.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 0.04749757144600153, | |
| "epoch": 1.4912280701754386, | |
| "grad_norm": 0.25072360038757324, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0402, | |
| "mean_token_accuracy": 0.9924940422177315, | |
| "num_tokens": 1001947.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 0.04762007407844067, | |
| "epoch": 1.5350877192982457, | |
| "grad_norm": 0.3604894280433655, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0392, | |
| "mean_token_accuracy": 0.992351396381855, | |
| "num_tokens": 1031488.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.047403684537857774, | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 0.2911185920238495, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0381, | |
| "mean_token_accuracy": 0.9927886813879013, | |
| "num_tokens": 1060887.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 0.04852928835898638, | |
| "epoch": 1.6228070175438596, | |
| "grad_norm": 0.2618354856967926, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0385, | |
| "mean_token_accuracy": 0.9927341029047966, | |
| "num_tokens": 1090348.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 0.04723916696384549, | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 0.26962020993232727, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0383, | |
| "mean_token_accuracy": 0.9925345599651336, | |
| "num_tokens": 1119839.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 0.046288730949163436, | |
| "epoch": 1.7105263157894737, | |
| "grad_norm": 0.4525667130947113, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0352, | |
| "mean_token_accuracy": 0.9927756577730179, | |
| "num_tokens": 1149317.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 0.04401880670338869, | |
| "epoch": 1.7543859649122808, | |
| "grad_norm": 0.23167090117931366, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0354, | |
| "mean_token_accuracy": 0.9926382765173912, | |
| "num_tokens": 1178783.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.04442735444754362, | |
| "epoch": 1.7982456140350878, | |
| "grad_norm": 0.2764274477958679, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0375, | |
| "mean_token_accuracy": 0.9925318494439125, | |
| "num_tokens": 1208255.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 0.046166476979851725, | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 0.3111913800239563, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0385, | |
| "mean_token_accuracy": 0.992423489689827, | |
| "num_tokens": 1237708.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 0.044908494455739856, | |
| "epoch": 1.8859649122807016, | |
| "grad_norm": 0.23237602412700653, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0385, | |
| "mean_token_accuracy": 0.9935551881790161, | |
| "num_tokens": 1267186.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 0.04562048772349954, | |
| "epoch": 1.9298245614035088, | |
| "grad_norm": 0.25153592228889465, | |
| "learning_rate": 2e-05, | |
| "loss": 0.033, | |
| "mean_token_accuracy": 0.9941498264670372, | |
| "num_tokens": 1296591.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 0.044570739334449175, | |
| "epoch": 1.973684210526316, | |
| "grad_norm": 0.23802141845226288, | |
| "learning_rate": 2e-05, | |
| "loss": 0.033, | |
| "mean_token_accuracy": 0.9942585557699204, | |
| "num_tokens": 1326042.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.044205385353416206, | |
| "epoch": 2.017543859649123, | |
| "grad_norm": 0.3467763066291809, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0331, | |
| "mean_token_accuracy": 0.9940735891461372, | |
| "num_tokens": 1355561.0, | |
| "step": 460 | |
| }, | |
| { | |
| "entropy": 0.042760218819603325, | |
| "epoch": 2.06140350877193, | |
| "grad_norm": 0.2680515646934509, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0319, | |
| "mean_token_accuracy": 0.9945705458521843, | |
| "num_tokens": 1385048.0, | |
| "step": 470 | |
| }, | |
| { | |
| "entropy": 0.04117725370451808, | |
| "epoch": 2.1052631578947367, | |
| "grad_norm": 0.39523598551750183, | |
| "learning_rate": 2e-05, | |
| "loss": 0.033, | |
| "mean_token_accuracy": 0.9936558306217194, | |
| "num_tokens": 1414543.0, | |
| "step": 480 | |
| }, | |
| { | |
| "entropy": 0.04356150296516716, | |
| "epoch": 2.1491228070175437, | |
| "grad_norm": 0.23617857694625854, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0332, | |
| "mean_token_accuracy": 0.9941312283277511, | |
| "num_tokens": 1444017.0, | |
| "step": 490 | |
| }, | |
| { | |
| "entropy": 0.04058904880657792, | |
| "epoch": 2.192982456140351, | |
| "grad_norm": 0.3990024924278259, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0285, | |
| "mean_token_accuracy": 0.9948335066437721, | |
| "num_tokens": 1473442.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.04089017482474446, | |
| "epoch": 2.236842105263158, | |
| "grad_norm": 0.22417540848255157, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0314, | |
| "mean_token_accuracy": 0.994497561454773, | |
| "num_tokens": 1502879.0, | |
| "step": 510 | |
| }, | |
| { | |
| "entropy": 0.04202216519042849, | |
| "epoch": 2.280701754385965, | |
| "grad_norm": 0.20546036958694458, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0321, | |
| "mean_token_accuracy": 0.9938287988305092, | |
| "num_tokens": 1532392.0, | |
| "step": 520 | |
| }, | |
| { | |
| "entropy": 0.04143659081310034, | |
| "epoch": 2.324561403508772, | |
| "grad_norm": 0.26604264974594116, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0313, | |
| "mean_token_accuracy": 0.9944339916110039, | |
| "num_tokens": 1561839.0, | |
| "step": 530 | |
| }, | |
| { | |
| "entropy": 0.03936287453398109, | |
| "epoch": 2.3684210526315788, | |
| "grad_norm": 0.23989664018154144, | |
| "learning_rate": 2e-05, | |
| "loss": 0.029, | |
| "mean_token_accuracy": 0.9943608403205871, | |
| "num_tokens": 1591271.0, | |
| "step": 540 | |
| }, | |
| { | |
| "entropy": 0.039195985486730936, | |
| "epoch": 2.412280701754386, | |
| "grad_norm": 0.3053726255893707, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0306, | |
| "mean_token_accuracy": 0.9943523585796357, | |
| "num_tokens": 1620794.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.039683265471830965, | |
| "epoch": 2.456140350877193, | |
| "grad_norm": 0.2588869631290436, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0293, | |
| "mean_token_accuracy": 0.9944723203778267, | |
| "num_tokens": 1650276.0, | |
| "step": 560 | |
| }, | |
| { | |
| "entropy": 0.03602620945312083, | |
| "epoch": 2.5, | |
| "grad_norm": 0.2668582797050476, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0292, | |
| "mean_token_accuracy": 0.9945638462901115, | |
| "num_tokens": 1679717.0, | |
| "step": 570 | |
| }, | |
| { | |
| "entropy": 0.03886530273593962, | |
| "epoch": 2.543859649122807, | |
| "grad_norm": 0.2594759464263916, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0294, | |
| "mean_token_accuracy": 0.9944349855184555, | |
| "num_tokens": 1709202.0, | |
| "step": 580 | |
| }, | |
| { | |
| "entropy": 0.03563018930144608, | |
| "epoch": 2.587719298245614, | |
| "grad_norm": 0.22012071311473846, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0272, | |
| "mean_token_accuracy": 0.9947109371423721, | |
| "num_tokens": 1738701.0, | |
| "step": 590 | |
| }, | |
| { | |
| "entropy": 0.03517519012093544, | |
| "epoch": 2.6315789473684212, | |
| "grad_norm": 0.2758197486400604, | |
| "learning_rate": 2e-05, | |
| "loss": 0.03, | |
| "mean_token_accuracy": 0.9942290917038917, | |
| "num_tokens": 1768151.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.03706208867952228, | |
| "epoch": 2.675438596491228, | |
| "grad_norm": 0.3245140016078949, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0288, | |
| "mean_token_accuracy": 0.9942933633923531, | |
| "num_tokens": 1797583.0, | |
| "step": 610 | |
| }, | |
| { | |
| "entropy": 0.034479991812258956, | |
| "epoch": 2.719298245614035, | |
| "grad_norm": 0.22120347619056702, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0282, | |
| "mean_token_accuracy": 0.9947970882058144, | |
| "num_tokens": 1827006.0, | |
| "step": 620 | |
| }, | |
| { | |
| "entropy": 0.033304579788818955, | |
| "epoch": 2.763157894736842, | |
| "grad_norm": 0.26880016922950745, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0277, | |
| "mean_token_accuracy": 0.994759914278984, | |
| "num_tokens": 1856428.0, | |
| "step": 630 | |
| }, | |
| { | |
| "entropy": 0.0331753586884588, | |
| "epoch": 2.807017543859649, | |
| "grad_norm": 0.24720199406147003, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0278, | |
| "mean_token_accuracy": 0.9946068048477172, | |
| "num_tokens": 1885916.0, | |
| "step": 640 | |
| }, | |
| { | |
| "entropy": 0.03345069149509072, | |
| "epoch": 2.8508771929824563, | |
| "grad_norm": 0.27891653776168823, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0275, | |
| "mean_token_accuracy": 0.9948126003146172, | |
| "num_tokens": 1915396.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.0333542559761554, | |
| "epoch": 2.8947368421052633, | |
| "grad_norm": 0.4004022479057312, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0289, | |
| "mean_token_accuracy": 0.9938531696796418, | |
| "num_tokens": 1945009.0, | |
| "step": 660 | |
| }, | |
| { | |
| "entropy": 0.02968177660368383, | |
| "epoch": 2.93859649122807, | |
| "grad_norm": 0.2902744710445404, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0241, | |
| "mean_token_accuracy": 0.9945998504757881, | |
| "num_tokens": 1974462.0, | |
| "step": 670 | |
| }, | |
| { | |
| "entropy": 0.0323303550016135, | |
| "epoch": 2.982456140350877, | |
| "grad_norm": 0.2170533686876297, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0287, | |
| "mean_token_accuracy": 0.9943123281002044, | |
| "num_tokens": 2003840.0, | |
| "step": 680 | |
| }, | |
| { | |
| "entropy": 0.0318671815097332, | |
| "epoch": 3.026315789473684, | |
| "grad_norm": 0.19683605432510376, | |
| "learning_rate": 2e-05, | |
| "loss": 0.027, | |
| "mean_token_accuracy": 0.9946763277053833, | |
| "num_tokens": 2033324.0, | |
| "step": 690 | |
| }, | |
| { | |
| "entropy": 0.03094636939931661, | |
| "epoch": 3.0701754385964914, | |
| "grad_norm": 0.3156299889087677, | |
| "learning_rate": 2e-05, | |
| "loss": 0.027, | |
| "mean_token_accuracy": 0.9943998187780381, | |
| "num_tokens": 2062796.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.029998012352734804, | |
| "epoch": 3.1140350877192984, | |
| "grad_norm": 0.19611181318759918, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0251, | |
| "mean_token_accuracy": 0.9945186242461205, | |
| "num_tokens": 2092349.0, | |
| "step": 710 | |
| }, | |
| { | |
| "entropy": 0.02844015813898295, | |
| "epoch": 3.1578947368421053, | |
| "grad_norm": 0.3108363151550293, | |
| "learning_rate": 2e-05, | |
| "loss": 0.026, | |
| "mean_token_accuracy": 0.9948573753237724, | |
| "num_tokens": 2121717.0, | |
| "step": 720 | |
| }, | |
| { | |
| "entropy": 0.02917947373352945, | |
| "epoch": 3.2017543859649122, | |
| "grad_norm": 0.3223120868206024, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0259, | |
| "mean_token_accuracy": 0.9950737491250038, | |
| "num_tokens": 2151171.0, | |
| "step": 730 | |
| }, | |
| { | |
| "entropy": 0.02822321942076087, | |
| "epoch": 3.245614035087719, | |
| "grad_norm": 0.1794469654560089, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0258, | |
| "mean_token_accuracy": 0.9945150166749954, | |
| "num_tokens": 2180705.0, | |
| "step": 740 | |
| }, | |
| { | |
| "entropy": 0.026581059489399195, | |
| "epoch": 3.2894736842105265, | |
| "grad_norm": 0.2445470094680786, | |
| "learning_rate": 2e-05, | |
| "loss": 0.026, | |
| "mean_token_accuracy": 0.9945855379104614, | |
| "num_tokens": 2210233.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.02701793306041509, | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 0.28041425347328186, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0237, | |
| "mean_token_accuracy": 0.9952751606702804, | |
| "num_tokens": 2239637.0, | |
| "step": 760 | |
| }, | |
| { | |
| "entropy": 0.03019589218311012, | |
| "epoch": 3.3771929824561404, | |
| "grad_norm": 0.25402745604515076, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0283, | |
| "mean_token_accuracy": 0.9943036273121834, | |
| "num_tokens": 2269135.0, | |
| "step": 770 | |
| }, | |
| { | |
| "entropy": 0.030228979233652354, | |
| "epoch": 3.4210526315789473, | |
| "grad_norm": 0.22020572423934937, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0269, | |
| "mean_token_accuracy": 0.9944290310144425, | |
| "num_tokens": 2298575.0, | |
| "step": 780 | |
| }, | |
| { | |
| "entropy": 0.03184722135774791, | |
| "epoch": 3.4649122807017543, | |
| "grad_norm": 0.2342633605003357, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0263, | |
| "mean_token_accuracy": 0.9947415545582772, | |
| "num_tokens": 2328042.0, | |
| "step": 790 | |
| }, | |
| { | |
| "entropy": 0.02975663202814758, | |
| "epoch": 3.5087719298245617, | |
| "grad_norm": 0.46808409690856934, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0277, | |
| "mean_token_accuracy": 0.9943336308002472, | |
| "num_tokens": 2357539.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.02755520197097212, | |
| "epoch": 3.5526315789473686, | |
| "grad_norm": 0.2859908938407898, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0269, | |
| "mean_token_accuracy": 0.9948696240782737, | |
| "num_tokens": 2386963.0, | |
| "step": 810 | |
| }, | |
| { | |
| "entropy": 0.0279413893353194, | |
| "epoch": 3.5964912280701755, | |
| "grad_norm": 0.2786264419555664, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0275, | |
| "mean_token_accuracy": 0.9943128302693367, | |
| "num_tokens": 2416501.0, | |
| "step": 820 | |
| }, | |
| { | |
| "entropy": 0.02768249998334795, | |
| "epoch": 3.6403508771929824, | |
| "grad_norm": 0.22936373949050903, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0239, | |
| "mean_token_accuracy": 0.9947321966290474, | |
| "num_tokens": 2445941.0, | |
| "step": 830 | |
| }, | |
| { | |
| "entropy": 0.02423749384470284, | |
| "epoch": 3.6842105263157894, | |
| "grad_norm": 0.2773398756980896, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0225, | |
| "mean_token_accuracy": 0.9952654018998146, | |
| "num_tokens": 2475299.0, | |
| "step": 840 | |
| }, | |
| { | |
| "entropy": 0.02699003741145134, | |
| "epoch": 3.7280701754385968, | |
| "grad_norm": 0.23085203766822815, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0257, | |
| "mean_token_accuracy": 0.9947722434997559, | |
| "num_tokens": 2504764.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.029456905997358264, | |
| "epoch": 3.7719298245614032, | |
| "grad_norm": 0.2776418924331665, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0238, | |
| "mean_token_accuracy": 0.9948301285505294, | |
| "num_tokens": 2534185.0, | |
| "step": 860 | |
| }, | |
| { | |
| "entropy": 0.026715041836723685, | |
| "epoch": 3.8157894736842106, | |
| "grad_norm": 0.40782320499420166, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0263, | |
| "mean_token_accuracy": 0.9943931043148041, | |
| "num_tokens": 2563632.0, | |
| "step": 870 | |
| }, | |
| { | |
| "entropy": 0.026448413264006376, | |
| "epoch": 3.8596491228070176, | |
| "grad_norm": 0.20839615166187286, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0253, | |
| "mean_token_accuracy": 0.9949382901191711, | |
| "num_tokens": 2593079.0, | |
| "step": 880 | |
| }, | |
| { | |
| "entropy": 0.026889733923599123, | |
| "epoch": 3.9035087719298245, | |
| "grad_norm": 0.4022772014141083, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0238, | |
| "mean_token_accuracy": 0.9950519934296608, | |
| "num_tokens": 2622599.0, | |
| "step": 890 | |
| }, | |
| { | |
| "entropy": 0.02558579680044204, | |
| "epoch": 3.9473684210526314, | |
| "grad_norm": 0.23025710880756378, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0238, | |
| "mean_token_accuracy": 0.9947320595383644, | |
| "num_tokens": 2652027.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.026929323840886355, | |
| "epoch": 3.9912280701754383, | |
| "grad_norm": 0.558501660823822, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0263, | |
| "mean_token_accuracy": 0.9945844635367393, | |
| "num_tokens": 2681568.0, | |
| "step": 910 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1824, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1678966011334656.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |