| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.962962962962963, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.805763495862484, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 0.6806204319000244, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 1.5484, | |
| "mean_token_accuracy": 0.6665007689595223, | |
| "num_tokens": 408149.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.41144788280129435, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.38455039262771606, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.3321, | |
| "mean_token_accuracy": 0.9129975068569184, | |
| "num_tokens": 816230.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.16323913749307395, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.29704713821411133, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.1447, | |
| "mean_token_accuracy": 0.9618216013908386, | |
| "num_tokens": 1224471.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.1175146003998816, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.35487300157546997, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.1071, | |
| "mean_token_accuracy": 0.9733556269109249, | |
| "num_tokens": 1632497.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.1009879010822624, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.17419321835041046, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.0925, | |
| "mean_token_accuracy": 0.9769376286864281, | |
| "num_tokens": 2041392.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.09154447751119733, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.20543242990970612, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.0836, | |
| "mean_token_accuracy": 0.9787026332318782, | |
| "num_tokens": 2450311.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.08632300381548702, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.17172595858573914, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.0812, | |
| "mean_token_accuracy": 0.9789653661847114, | |
| "num_tokens": 2858744.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.08412999271415174, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.1447569578886032, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.0805, | |
| "mean_token_accuracy": 0.9786932443082332, | |
| "num_tokens": 3265542.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.08065679710358381, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.19630704820156097, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.0773, | |
| "mean_token_accuracy": 0.9797722736001014, | |
| "num_tokens": 3674162.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.07874332463368773, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.08524929732084274, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.0762, | |
| "mean_token_accuracy": 0.9801681047677994, | |
| "num_tokens": 4082734.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.07778633100911975, | |
| "epoch": 0.8148148148148148, | |
| "grad_norm": 0.10848797112703323, | |
| "learning_rate": 8.552459335135381e-05, | |
| "loss": 0.0753, | |
| "mean_token_accuracy": 0.9801766823232174, | |
| "num_tokens": 4491115.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.07791180345229805, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.12547598779201508, | |
| "learning_rate": 8.259993435156559e-05, | |
| "loss": 0.0752, | |
| "mean_token_accuracy": 0.9802092918753624, | |
| "num_tokens": 4899794.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.07790746555663645, | |
| "epoch": 0.9629629629629629, | |
| "grad_norm": 0.0992884486913681, | |
| "learning_rate": 7.946685410208296e-05, | |
| "loss": 0.0759, | |
| "mean_token_accuracy": 0.979848040342331, | |
| "num_tokens": 5307342.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.07524958597496152, | |
| "epoch": 1.037037037037037, | |
| "grad_norm": 0.086652472615242, | |
| "learning_rate": 7.614538333345735e-05, | |
| "loss": 0.0731, | |
| "mean_token_accuracy": 0.9808213406801224, | |
| "num_tokens": 5716156.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.07525249728001654, | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.13188883662223816, | |
| "learning_rate": 7.265675721386285e-05, | |
| "loss": 0.0728, | |
| "mean_token_accuracy": 0.9810096868872642, | |
| "num_tokens": 6123905.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.07601501471363008, | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 0.0819055363535881, | |
| "learning_rate": 6.902327958623736e-05, | |
| "loss": 0.0736, | |
| "mean_token_accuracy": 0.9805573572218418, | |
| "num_tokens": 6532143.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.07429057988338172, | |
| "epoch": 1.2592592592592593, | |
| "grad_norm": 0.09344803541898727, | |
| "learning_rate": 6.526818037306228e-05, | |
| "loss": 0.0727, | |
| "mean_token_accuracy": 0.9813062380254268, | |
| "num_tokens": 6940424.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.07440330957062542, | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.11321987956762314, | |
| "learning_rate": 6.14154670604355e-05, | |
| "loss": 0.0725, | |
| "mean_token_accuracy": 0.9811275874078274, | |
| "num_tokens": 7348719.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.07393304943107068, | |
| "epoch": 1.4074074074074074, | |
| "grad_norm": 0.087185338139534, | |
| "learning_rate": 5.7489771210944564e-05, | |
| "loss": 0.0726, | |
| "mean_token_accuracy": 0.9807305666804313, | |
| "num_tokens": 7756710.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 0.07433672657236456, | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 0.10077723860740662, | |
| "learning_rate": 5.351619098663021e-05, | |
| "loss": 0.0726, | |
| "mean_token_accuracy": 0.9807634821534157, | |
| "num_tokens": 8165027.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "entropy": 0.07262381819076837, | |
| "epoch": 1.5555555555555556, | |
| "grad_norm": 0.05774468928575516, | |
| "learning_rate": 4.952013068883795e-05, | |
| "loss": 0.0717, | |
| "mean_token_accuracy": 0.9813279174268246, | |
| "num_tokens": 8573428.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "entropy": 0.07313465082086623, | |
| "epoch": 1.6296296296296298, | |
| "grad_norm": 0.156134694814682, | |
| "learning_rate": 4.5527138340828776e-05, | |
| "loss": 0.0717, | |
| "mean_token_accuracy": 0.9811334984004497, | |
| "num_tokens": 8981661.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "entropy": 0.07226949028670787, | |
| "epoch": 1.7037037037037037, | |
| "grad_norm": 0.060666773468256, | |
| "learning_rate": 4.156274235153189e-05, | |
| "loss": 0.071, | |
| "mean_token_accuracy": 0.9813579262793064, | |
| "num_tokens": 9390026.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "entropy": 0.07272680706344545, | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 0.10639354586601257, | |
| "learning_rate": 3.765228830469794e-05, | |
| "loss": 0.0711, | |
| "mean_token_accuracy": 0.981099860817194, | |
| "num_tokens": 9798433.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "entropy": 0.07212639102712273, | |
| "epoch": 1.8518518518518519, | |
| "grad_norm": 0.06637933105230331, | |
| "learning_rate": 3.3820776916908857e-05, | |
| "loss": 0.0711, | |
| "mean_token_accuracy": 0.9812221045792103, | |
| "num_tokens": 10206854.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "entropy": 0.07320461552590132, | |
| "epoch": 1.925925925925926, | |
| "grad_norm": 0.10055620223283768, | |
| "learning_rate": 3.0092704200428058e-05, | |
| "loss": 0.0719, | |
| "mean_token_accuracy": 0.9807316599786282, | |
| "num_tokens": 10615186.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "entropy": 0.07290558220818638, | |
| "epoch": 2.0, | |
| "grad_norm": 0.0677887499332428, | |
| "learning_rate": 2.649190485277792e-05, | |
| "loss": 0.0718, | |
| "mean_token_accuracy": 0.9811730526387692, | |
| "num_tokens": 11023650.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "entropy": 0.07251124914735556, | |
| "epoch": 2.074074074074074, | |
| "grad_norm": 0.11045810580253601, | |
| "learning_rate": 2.3041399874302905e-05, | |
| "loss": 0.0709, | |
| "mean_token_accuracy": 0.9809805656969547, | |
| "num_tokens": 11430802.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "entropy": 0.07181317125447094, | |
| "epoch": 2.148148148148148, | |
| "grad_norm": 0.07149960100650787, | |
| "learning_rate": 1.976324938794482e-05, | |
| "loss": 0.0708, | |
| "mean_token_accuracy": 0.9813783176243305, | |
| "num_tokens": 11839809.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "entropy": 0.0717768538929522, | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.05869750306010246, | |
| "learning_rate": 1.667841160219835e-05, | |
| "loss": 0.0707, | |
| "mean_token_accuracy": 0.9814658354222775, | |
| "num_tokens": 12248612.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "entropy": 0.0715598820708692, | |
| "epoch": 2.2962962962962963, | |
| "grad_norm": 0.08496281504631042, | |
| "learning_rate": 1.3806608818939203e-05, | |
| "loss": 0.0705, | |
| "mean_token_accuracy": 0.9812778060138225, | |
| "num_tokens": 12657077.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "entropy": 0.07179237512871622, | |
| "epoch": 2.3703703703703702, | |
| "grad_norm": 0.06790705770254135, | |
| "learning_rate": 1.1166201342777438e-05, | |
| "loss": 0.0709, | |
| "mean_token_accuracy": 0.9812556092441082, | |
| "num_tokens": 13064930.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "entropy": 0.0720329173374921, | |
| "epoch": 2.4444444444444446, | |
| "grad_norm": 0.06569824367761612, | |
| "learning_rate": 8.774070098071668e-06, | |
| "loss": 0.0711, | |
| "mean_token_accuracy": 0.9811322076618672, | |
| "num_tokens": 13472953.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "entropy": 0.07238605052232742, | |
| "epoch": 2.5185185185185186, | |
| "grad_norm": 0.05740037187933922, | |
| "learning_rate": 6.645508704069003e-06, | |
| "loss": 0.0712, | |
| "mean_token_accuracy": 0.9810698322951794, | |
| "num_tokens": 13881339.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "entropy": 0.07082363245077432, | |
| "epoch": 2.5925925925925926, | |
| "grad_norm": 0.06362631171941757, | |
| "learning_rate": 4.794125698167262e-06, | |
| "loss": 0.0703, | |
| "mean_token_accuracy": 0.9813959409296512, | |
| "num_tokens": 14289567.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "entropy": 0.07266209500841797, | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.05623815581202507, | |
| "learning_rate": 3.231757532415458e-06, | |
| "loss": 0.0714, | |
| "mean_token_accuracy": 0.9809911704063415, | |
| "num_tokens": 14697205.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "entropy": 0.07201593144796789, | |
| "epoch": 2.7407407407407405, | |
| "grad_norm": 0.05804692208766937, | |
| "learning_rate": 1.9683928994924385e-06, | |
| "loss": 0.071, | |
| "mean_token_accuracy": 0.9810844567418099, | |
| "num_tokens": 15105573.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "entropy": 0.07237200179137289, | |
| "epoch": 2.814814814814815, | |
| "grad_norm": 0.07103168219327927, | |
| "learning_rate": 1.0121088719706296e-06, | |
| "loss": 0.0715, | |
| "mean_token_accuracy": 0.9812616856396198, | |
| "num_tokens": 15514180.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "entropy": 0.07117950812913477, | |
| "epoch": 2.888888888888889, | |
| "grad_norm": 0.062420960515737534, | |
| "learning_rate": 3.6901926314575894e-07, | |
| "loss": 0.0703, | |
| "mean_token_accuracy": 0.981419977247715, | |
| "num_tokens": 15922315.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "entropy": 0.07066867646761238, | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 0.05154326930642128, | |
| "learning_rate": 4.323553957759629e-08, | |
| "loss": 0.07, | |
| "mean_token_accuracy": 0.9817947860062123, | |
| "num_tokens": 16331178.0, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.821968917982372e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |