| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 456, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.3891168855130672, | |
| "epoch": 0.043859649122807015, | |
| "grad_norm": 18.536834716796875, | |
| "learning_rate": 2e-05, | |
| "loss": 4.281, | |
| "mean_token_accuracy": 0.5703265547752381, | |
| "num_tokens": 29513.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.749189381301403, | |
| "epoch": 0.08771929824561403, | |
| "grad_norm": 3.8398051261901855, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9625, | |
| "mean_token_accuracy": 0.6897866070270539, | |
| "num_tokens": 58932.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.169826951622963, | |
| "epoch": 0.13157894736842105, | |
| "grad_norm": 2.5061917304992676, | |
| "learning_rate": 2e-05, | |
| "loss": 1.4255, | |
| "mean_token_accuracy": 0.7357227891683579, | |
| "num_tokens": 88352.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.1334790736436844, | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 1.9473565816879272, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1315, | |
| "mean_token_accuracy": 0.7862708762288093, | |
| "num_tokens": 117877.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.8736076682806015, | |
| "epoch": 0.21929824561403508, | |
| "grad_norm": 1.7826578617095947, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8549, | |
| "mean_token_accuracy": 0.8343304082751274, | |
| "num_tokens": 147392.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5955807730555535, | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 1.6632131338119507, | |
| "learning_rate": 2e-05, | |
| "loss": 0.58, | |
| "mean_token_accuracy": 0.8876266479492188, | |
| "num_tokens": 176728.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.34382262006402015, | |
| "epoch": 0.30701754385964913, | |
| "grad_norm": 1.4843252897262573, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3445, | |
| "mean_token_accuracy": 0.9318736225366593, | |
| "num_tokens": 206184.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.19373956136405468, | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 0.904003918170929, | |
| "learning_rate": 2e-05, | |
| "loss": 0.189, | |
| "mean_token_accuracy": 0.9710342198610306, | |
| "num_tokens": 235688.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.12052652426064014, | |
| "epoch": 0.39473684210526316, | |
| "grad_norm": 0.7406700849533081, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1056, | |
| "mean_token_accuracy": 0.9886028110980988, | |
| "num_tokens": 265172.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.0992697212845087, | |
| "epoch": 0.43859649122807015, | |
| "grad_norm": 0.606019139289856, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0817, | |
| "mean_token_accuracy": 0.989163076877594, | |
| "num_tokens": 294687.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.08367474023252726, | |
| "epoch": 0.4824561403508772, | |
| "grad_norm": 0.4705955386161804, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0737, | |
| "mean_token_accuracy": 0.9905414953827858, | |
| "num_tokens": 324088.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07386345528066159, | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.4300910234451294, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0631, | |
| "mean_token_accuracy": 0.9910139158368111, | |
| "num_tokens": 353582.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.06888655042275786, | |
| "epoch": 0.5701754385964912, | |
| "grad_norm": 0.45082923769950867, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0661, | |
| "mean_token_accuracy": 0.9910464301705361, | |
| "num_tokens": 383080.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.06553246006369591, | |
| "epoch": 0.6140350877192983, | |
| "grad_norm": 0.4258216619491577, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0615, | |
| "mean_token_accuracy": 0.9914803236722947, | |
| "num_tokens": 412559.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.06352790119126439, | |
| "epoch": 0.6578947368421053, | |
| "grad_norm": 0.37360644340515137, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0579, | |
| "mean_token_accuracy": 0.9916400715708733, | |
| "num_tokens": 442008.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.060972847137600185, | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 0.3406882584095001, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0576, | |
| "mean_token_accuracy": 0.9914845108985901, | |
| "num_tokens": 471495.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.05891128294169903, | |
| "epoch": 0.7456140350877193, | |
| "grad_norm": 0.2854160964488983, | |
| "learning_rate": 2e-05, | |
| "loss": 0.057, | |
| "mean_token_accuracy": 0.9915445119142532, | |
| "num_tokens": 500954.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.0579888011328876, | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.40168505907058716, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0507, | |
| "mean_token_accuracy": 0.9923512250185013, | |
| "num_tokens": 530401.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.0573225624859333, | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 0.4589792490005493, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0561, | |
| "mean_token_accuracy": 0.9916056364774704, | |
| "num_tokens": 559940.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.05970530286431312, | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 0.5275149941444397, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0573, | |
| "mean_token_accuracy": 0.9915869951248169, | |
| "num_tokens": 589427.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.059507152158766986, | |
| "epoch": 0.9210526315789473, | |
| "grad_norm": 0.2661688029766083, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0515, | |
| "mean_token_accuracy": 0.9923170626163482, | |
| "num_tokens": 618838.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.05687206219881773, | |
| "epoch": 0.9649122807017544, | |
| "grad_norm": 0.35179731249809265, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0506, | |
| "mean_token_accuracy": 0.9918064430356026, | |
| "num_tokens": 648258.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.058181710354983804, | |
| "epoch": 1.0087719298245614, | |
| "grad_norm": 0.3514145314693451, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0522, | |
| "mean_token_accuracy": 0.9915239199995994, | |
| "num_tokens": 677779.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.055358363036066296, | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 0.3630446493625641, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0482, | |
| "mean_token_accuracy": 0.9922210231423378, | |
| "num_tokens": 707217.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.0538893367163837, | |
| "epoch": 1.0964912280701755, | |
| "grad_norm": 0.3740084767341614, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0448, | |
| "mean_token_accuracy": 0.9927210569381714, | |
| "num_tokens": 736641.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.0544189871288836, | |
| "epoch": 1.1403508771929824, | |
| "grad_norm": 0.31057634949684143, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0474, | |
| "mean_token_accuracy": 0.9922840282320976, | |
| "num_tokens": 766189.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.053078283276408914, | |
| "epoch": 1.1842105263157894, | |
| "grad_norm": 0.21449895203113556, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0468, | |
| "mean_token_accuracy": 0.9923206493258476, | |
| "num_tokens": 795639.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.05415161233395338, | |
| "epoch": 1.2280701754385965, | |
| "grad_norm": 0.30692726373672485, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0458, | |
| "mean_token_accuracy": 0.9922255620360374, | |
| "num_tokens": 825119.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 0.05160839455202222, | |
| "epoch": 1.2719298245614035, | |
| "grad_norm": 0.3166206479072571, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0446, | |
| "mean_token_accuracy": 0.9921553313732148, | |
| "num_tokens": 854581.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 0.05075615206733346, | |
| "epoch": 1.3157894736842106, | |
| "grad_norm": 0.3627457618713379, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0417, | |
| "mean_token_accuracy": 0.9924884453415871, | |
| "num_tokens": 884020.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.0517747713252902, | |
| "epoch": 1.3596491228070176, | |
| "grad_norm": 0.37561649084091187, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0435, | |
| "mean_token_accuracy": 0.992647610604763, | |
| "num_tokens": 913529.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 0.05250948471948504, | |
| "epoch": 1.4035087719298245, | |
| "grad_norm": 0.42561790347099304, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0431, | |
| "mean_token_accuracy": 0.9922929123044014, | |
| "num_tokens": 942986.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 0.05172846736386418, | |
| "epoch": 1.4473684210526316, | |
| "grad_norm": 0.2577071785926819, | |
| "learning_rate": 2e-05, | |
| "loss": 0.041, | |
| "mean_token_accuracy": 0.9923748031258584, | |
| "num_tokens": 972495.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 0.04749757144600153, | |
| "epoch": 1.4912280701754386, | |
| "grad_norm": 0.25072360038757324, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0402, | |
| "mean_token_accuracy": 0.9924940422177315, | |
| "num_tokens": 1001947.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 0.04762007407844067, | |
| "epoch": 1.5350877192982457, | |
| "grad_norm": 0.3604894280433655, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0392, | |
| "mean_token_accuracy": 0.992351396381855, | |
| "num_tokens": 1031488.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.047403684537857774, | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 0.2911185920238495, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0381, | |
| "mean_token_accuracy": 0.9927886813879013, | |
| "num_tokens": 1060887.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 0.04852928835898638, | |
| "epoch": 1.6228070175438596, | |
| "grad_norm": 0.2618354856967926, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0385, | |
| "mean_token_accuracy": 0.9927341029047966, | |
| "num_tokens": 1090348.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 0.04723916696384549, | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 0.26962020993232727, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0383, | |
| "mean_token_accuracy": 0.9925345599651336, | |
| "num_tokens": 1119839.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 0.046288730949163436, | |
| "epoch": 1.7105263157894737, | |
| "grad_norm": 0.4525667130947113, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0352, | |
| "mean_token_accuracy": 0.9927756577730179, | |
| "num_tokens": 1149317.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 0.04401880670338869, | |
| "epoch": 1.7543859649122808, | |
| "grad_norm": 0.23167090117931366, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0354, | |
| "mean_token_accuracy": 0.9926382765173912, | |
| "num_tokens": 1178783.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.04442735444754362, | |
| "epoch": 1.7982456140350878, | |
| "grad_norm": 0.2764274477958679, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0375, | |
| "mean_token_accuracy": 0.9925318494439125, | |
| "num_tokens": 1208255.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 0.046166476979851725, | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 0.3111913800239563, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0385, | |
| "mean_token_accuracy": 0.992423489689827, | |
| "num_tokens": 1237708.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 0.044908494455739856, | |
| "epoch": 1.8859649122807016, | |
| "grad_norm": 0.23237602412700653, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0385, | |
| "mean_token_accuracy": 0.9935551881790161, | |
| "num_tokens": 1267186.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 0.04562048772349954, | |
| "epoch": 1.9298245614035088, | |
| "grad_norm": 0.25153592228889465, | |
| "learning_rate": 2e-05, | |
| "loss": 0.033, | |
| "mean_token_accuracy": 0.9941498264670372, | |
| "num_tokens": 1296591.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 0.044570739334449175, | |
| "epoch": 1.973684210526316, | |
| "grad_norm": 0.23802141845226288, | |
| "learning_rate": 2e-05, | |
| "loss": 0.033, | |
| "mean_token_accuracy": 0.9942585557699204, | |
| "num_tokens": 1326042.0, | |
| "step": 450 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1824, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 839483005667328.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |