| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 10, | |
| "global_step": 134, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0149812734082397, | |
| "grad_norm": 6.645484924316406, | |
| "learning_rate": 1e-05, | |
| "loss": 1.6793, | |
| "mean_token_accuracy": 0.6077414005994797, | |
| "num_tokens": 7520.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0299625468164794, | |
| "grad_norm": 5.208061218261719, | |
| "learning_rate": 9.96268656716418e-06, | |
| "loss": 1.2004, | |
| "mean_token_accuracy": 0.7127931267023087, | |
| "num_tokens": 14631.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0449438202247191, | |
| "grad_norm": 4.237852573394775, | |
| "learning_rate": 9.925373134328359e-06, | |
| "loss": 1.3345, | |
| "mean_token_accuracy": 0.6681558042764664, | |
| "num_tokens": 21698.0, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0599250936329588, | |
| "grad_norm": 3.2547247409820557, | |
| "learning_rate": 9.888059701492538e-06, | |
| "loss": 1.0936, | |
| "mean_token_accuracy": 0.7124579399824142, | |
| "num_tokens": 29763.0, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0749063670411985, | |
| "grad_norm": 3.279022693634033, | |
| "learning_rate": 9.850746268656717e-06, | |
| "loss": 1.1485, | |
| "mean_token_accuracy": 0.6985869109630585, | |
| "num_tokens": 36572.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0898876404494382, | |
| "grad_norm": 3.9445199966430664, | |
| "learning_rate": 9.813432835820897e-06, | |
| "loss": 1.2735, | |
| "mean_token_accuracy": 0.6686816662549973, | |
| "num_tokens": 42969.0, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.10486891385767791, | |
| "grad_norm": 3.1979117393493652, | |
| "learning_rate": 9.776119402985076e-06, | |
| "loss": 1.1807, | |
| "mean_token_accuracy": 0.6792618632316589, | |
| "num_tokens": 49628.0, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.1198501872659176, | |
| "grad_norm": 2.979384183883667, | |
| "learning_rate": 9.738805970149255e-06, | |
| "loss": 1.1305, | |
| "mean_token_accuracy": 0.6916055679321289, | |
| "num_tokens": 57634.0, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.1348314606741573, | |
| "grad_norm": 4.175307750701904, | |
| "learning_rate": 9.701492537313434e-06, | |
| "loss": 1.1587, | |
| "mean_token_accuracy": 0.6850023865699768, | |
| "num_tokens": 64493.0, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.149812734082397, | |
| "grad_norm": 3.318324565887451, | |
| "learning_rate": 9.664179104477612e-06, | |
| "loss": 1.2182, | |
| "mean_token_accuracy": 0.666175588965416, | |
| "num_tokens": 71062.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.149812734082397, | |
| "eval_loss": 1.1548303365707397, | |
| "eval_mean_token_accuracy": 0.6638695597648621, | |
| "eval_num_tokens": 71062.0, | |
| "eval_runtime": 43.4636, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1647940074906367, | |
| "grad_norm": 3.0939977169036865, | |
| "learning_rate": 9.626865671641792e-06, | |
| "loss": 1.0469, | |
| "mean_token_accuracy": 0.7054508179426193, | |
| "num_tokens": 78715.0, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.1797752808988764, | |
| "grad_norm": 3.1718571186065674, | |
| "learning_rate": 9.589552238805972e-06, | |
| "loss": 0.9968, | |
| "mean_token_accuracy": 0.7284530699253082, | |
| "num_tokens": 86684.0, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.1947565543071161, | |
| "grad_norm": 2.906012535095215, | |
| "learning_rate": 9.552238805970149e-06, | |
| "loss": 1.0074, | |
| "mean_token_accuracy": 0.7302294373512268, | |
| "num_tokens": 94023.0, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.20973782771535582, | |
| "grad_norm": 3.084291458129883, | |
| "learning_rate": 9.51492537313433e-06, | |
| "loss": 1.096, | |
| "mean_token_accuracy": 0.6945489645004272, | |
| "num_tokens": 101018.0, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.2247191011235955, | |
| "grad_norm": 3.168394088745117, | |
| "learning_rate": 9.477611940298507e-06, | |
| "loss": 1.1981, | |
| "mean_token_accuracy": 0.6750565618276596, | |
| "num_tokens": 108576.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.2397003745318352, | |
| "grad_norm": 3.0310959815979004, | |
| "learning_rate": 9.440298507462688e-06, | |
| "loss": 1.2195, | |
| "mean_token_accuracy": 0.6774358600378036, | |
| "num_tokens": 116231.0, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.2546816479400749, | |
| "grad_norm": 3.1885411739349365, | |
| "learning_rate": 9.402985074626867e-06, | |
| "loss": 1.1123, | |
| "mean_token_accuracy": 0.6851049065589905, | |
| "num_tokens": 122820.0, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.2696629213483146, | |
| "grad_norm": 2.9225549697875977, | |
| "learning_rate": 9.365671641791045e-06, | |
| "loss": 1.1255, | |
| "mean_token_accuracy": 0.6796696484088898, | |
| "num_tokens": 130744.0, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.2846441947565543, | |
| "grad_norm": 3.281294345855713, | |
| "learning_rate": 9.328358208955226e-06, | |
| "loss": 1.1157, | |
| "mean_token_accuracy": 0.6871359646320343, | |
| "num_tokens": 137441.0, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.299625468164794, | |
| "grad_norm": 3.2192912101745605, | |
| "learning_rate": 9.291044776119403e-06, | |
| "loss": 1.3184, | |
| "mean_token_accuracy": 0.6449258774518967, | |
| "num_tokens": 143961.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.299625468164794, | |
| "eval_loss": 1.0996215343475342, | |
| "eval_mean_token_accuracy": 0.6732441584269205, | |
| "eval_num_tokens": 143961.0, | |
| "eval_runtime": 43.4644, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.3146067415730337, | |
| "grad_norm": 2.8447461128234863, | |
| "learning_rate": 9.253731343283582e-06, | |
| "loss": 0.8826, | |
| "mean_token_accuracy": 0.7349306046962738, | |
| "num_tokens": 150885.0, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.3295880149812734, | |
| "grad_norm": 2.926057815551758, | |
| "learning_rate": 9.216417910447763e-06, | |
| "loss": 0.958, | |
| "mean_token_accuracy": 0.7313619703054428, | |
| "num_tokens": 157876.0, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.3445692883895131, | |
| "grad_norm": 3.294577121734619, | |
| "learning_rate": 9.17910447761194e-06, | |
| "loss": 1.1452, | |
| "mean_token_accuracy": 0.6808282732963562, | |
| "num_tokens": 165203.0, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3595505617977528, | |
| "grad_norm": 3.026679515838623, | |
| "learning_rate": 9.14179104477612e-06, | |
| "loss": 1.0559, | |
| "mean_token_accuracy": 0.7152852565050125, | |
| "num_tokens": 171790.0, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.37453183520599254, | |
| "grad_norm": 2.5701076984405518, | |
| "learning_rate": 9.104477611940299e-06, | |
| "loss": 0.8404, | |
| "mean_token_accuracy": 0.755279466509819, | |
| "num_tokens": 179560.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3895131086142322, | |
| "grad_norm": 2.930906295776367, | |
| "learning_rate": 9.067164179104478e-06, | |
| "loss": 1.0629, | |
| "mean_token_accuracy": 0.6979093104600906, | |
| "num_tokens": 187129.0, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.4044943820224719, | |
| "grad_norm": 2.5857908725738525, | |
| "learning_rate": 9.029850746268657e-06, | |
| "loss": 0.9093, | |
| "mean_token_accuracy": 0.7412554323673248, | |
| "num_tokens": 194238.0, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.41947565543071164, | |
| "grad_norm": 2.9247360229492188, | |
| "learning_rate": 8.992537313432836e-06, | |
| "loss": 1.0202, | |
| "mean_token_accuracy": 0.7101631760597229, | |
| "num_tokens": 201176.0, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.4344569288389513, | |
| "grad_norm": 3.2126927375793457, | |
| "learning_rate": 8.955223880597016e-06, | |
| "loss": 1.1267, | |
| "mean_token_accuracy": 0.6963846385478973, | |
| "num_tokens": 207843.0, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.449438202247191, | |
| "grad_norm": 2.6786251068115234, | |
| "learning_rate": 8.917910447761195e-06, | |
| "loss": 0.9444, | |
| "mean_token_accuracy": 0.7381020188331604, | |
| "num_tokens": 215058.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.449438202247191, | |
| "eval_loss": 1.0702282190322876, | |
| "eval_mean_token_accuracy": 0.6778951783974966, | |
| "eval_num_tokens": 215058.0, | |
| "eval_runtime": 43.4996, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.46441947565543074, | |
| "grad_norm": 3.422229528427124, | |
| "learning_rate": 8.880597014925374e-06, | |
| "loss": 1.1104, | |
| "mean_token_accuracy": 0.6989456862211227, | |
| "num_tokens": 220989.0, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.4794007490636704, | |
| "grad_norm": 2.9094724655151367, | |
| "learning_rate": 8.843283582089553e-06, | |
| "loss": 0.9101, | |
| "mean_token_accuracy": 0.7397958487272263, | |
| "num_tokens": 228235.0, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4943820224719101, | |
| "grad_norm": 2.905221700668335, | |
| "learning_rate": 8.805970149253732e-06, | |
| "loss": 0.9017, | |
| "mean_token_accuracy": 0.739780843257904, | |
| "num_tokens": 235497.0, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.5093632958801498, | |
| "grad_norm": 2.7689316272735596, | |
| "learning_rate": 8.768656716417911e-06, | |
| "loss": 0.8806, | |
| "mean_token_accuracy": 0.7460598647594452, | |
| "num_tokens": 242951.0, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.5243445692883895, | |
| "grad_norm": 2.711458683013916, | |
| "learning_rate": 8.73134328358209e-06, | |
| "loss": 0.9845, | |
| "mean_token_accuracy": 0.7096125036478043, | |
| "num_tokens": 250554.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5393258426966292, | |
| "grad_norm": 2.9433722496032715, | |
| "learning_rate": 8.69402985074627e-06, | |
| "loss": 0.9512, | |
| "mean_token_accuracy": 0.7236437946557999, | |
| "num_tokens": 258167.0, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.5543071161048689, | |
| "grad_norm": 3.174933910369873, | |
| "learning_rate": 8.656716417910447e-06, | |
| "loss": 1.075, | |
| "mean_token_accuracy": 0.6945899575948715, | |
| "num_tokens": 265000.0, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.5692883895131086, | |
| "grad_norm": 2.713667392730713, | |
| "learning_rate": 8.619402985074628e-06, | |
| "loss": 0.8271, | |
| "mean_token_accuracy": 0.7603205889463425, | |
| "num_tokens": 272757.0, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5842696629213483, | |
| "grad_norm": 2.6623470783233643, | |
| "learning_rate": 8.582089552238807e-06, | |
| "loss": 0.8481, | |
| "mean_token_accuracy": 0.7493215799331665, | |
| "num_tokens": 280232.0, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.599250936329588, | |
| "grad_norm": 2.6669371128082275, | |
| "learning_rate": 8.544776119402986e-06, | |
| "loss": 1.0571, | |
| "mean_token_accuracy": 0.6984888315200806, | |
| "num_tokens": 287844.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.599250936329588, | |
| "eval_loss": 1.0412267446517944, | |
| "eval_mean_token_accuracy": 0.6823226809501648, | |
| "eval_num_tokens": 287844.0, | |
| "eval_runtime": 43.4294, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6142322097378277, | |
| "grad_norm": 2.81078839302063, | |
| "learning_rate": 8.507462686567165e-06, | |
| "loss": 0.8895, | |
| "mean_token_accuracy": 0.7473254799842834, | |
| "num_tokens": 294955.0, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.6292134831460674, | |
| "grad_norm": 2.718947649002075, | |
| "learning_rate": 8.470149253731343e-06, | |
| "loss": 0.9208, | |
| "mean_token_accuracy": 0.7264387309551239, | |
| "num_tokens": 302360.0, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.6441947565543071, | |
| "grad_norm": 2.797668933868408, | |
| "learning_rate": 8.432835820895524e-06, | |
| "loss": 1.0173, | |
| "mean_token_accuracy": 0.7081942111253738, | |
| "num_tokens": 309893.0, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.6591760299625468, | |
| "grad_norm": 3.0845787525177, | |
| "learning_rate": 8.395522388059703e-06, | |
| "loss": 1.0575, | |
| "mean_token_accuracy": 0.7010719627141953, | |
| "num_tokens": 316759.0, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.6741573033707865, | |
| "grad_norm": 2.764462947845459, | |
| "learning_rate": 8.35820895522388e-06, | |
| "loss": 1.1272, | |
| "mean_token_accuracy": 0.6816393285989761, | |
| "num_tokens": 324600.0, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6891385767790262, | |
| "grad_norm": 2.4682984352111816, | |
| "learning_rate": 8.320895522388061e-06, | |
| "loss": 0.797, | |
| "mean_token_accuracy": 0.7550250142812729, | |
| "num_tokens": 331644.0, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.704119850187266, | |
| "grad_norm": 2.796246290206909, | |
| "learning_rate": 8.283582089552239e-06, | |
| "loss": 1.0701, | |
| "mean_token_accuracy": 0.6996825039386749, | |
| "num_tokens": 338637.0, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.7191011235955056, | |
| "grad_norm": 2.880173921585083, | |
| "learning_rate": 8.246268656716418e-06, | |
| "loss": 1.1005, | |
| "mean_token_accuracy": 0.6946816593408585, | |
| "num_tokens": 345963.0, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.7340823970037453, | |
| "grad_norm": 2.7749922275543213, | |
| "learning_rate": 8.208955223880599e-06, | |
| "loss": 0.9123, | |
| "mean_token_accuracy": 0.7341801673173904, | |
| "num_tokens": 353032.0, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.7490636704119851, | |
| "grad_norm": 2.716906785964966, | |
| "learning_rate": 8.171641791044776e-06, | |
| "loss": 0.9649, | |
| "mean_token_accuracy": 0.7190811187028885, | |
| "num_tokens": 360374.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7490636704119851, | |
| "eval_loss": 1.0224930047988892, | |
| "eval_mean_token_accuracy": 0.6871222953001658, | |
| "eval_num_tokens": 360374.0, | |
| "eval_runtime": 43.4776, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7640449438202247, | |
| "grad_norm": 2.576030731201172, | |
| "learning_rate": 8.134328358208955e-06, | |
| "loss": 0.9832, | |
| "mean_token_accuracy": 0.7258175909519196, | |
| "num_tokens": 367529.0, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.7790262172284644, | |
| "grad_norm": 2.765681743621826, | |
| "learning_rate": 8.097014925373135e-06, | |
| "loss": 0.9807, | |
| "mean_token_accuracy": 0.7210909426212311, | |
| "num_tokens": 374472.0, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.7940074906367042, | |
| "grad_norm": 2.5107247829437256, | |
| "learning_rate": 8.059701492537314e-06, | |
| "loss": 0.86, | |
| "mean_token_accuracy": 0.7453574240207672, | |
| "num_tokens": 382443.0, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.8089887640449438, | |
| "grad_norm": 2.511554479598999, | |
| "learning_rate": 8.022388059701493e-06, | |
| "loss": 0.9156, | |
| "mean_token_accuracy": 0.7413896918296814, | |
| "num_tokens": 389998.0, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.8239700374531835, | |
| "grad_norm": 2.9105887413024902, | |
| "learning_rate": 7.985074626865672e-06, | |
| "loss": 0.8634, | |
| "mean_token_accuracy": 0.7419312596321106, | |
| "num_tokens": 396698.0, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.8389513108614233, | |
| "grad_norm": 2.4456405639648438, | |
| "learning_rate": 7.947761194029851e-06, | |
| "loss": 0.8547, | |
| "mean_token_accuracy": 0.7464274764060974, | |
| "num_tokens": 403984.0, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.8539325842696629, | |
| "grad_norm": 3.117522716522217, | |
| "learning_rate": 7.91044776119403e-06, | |
| "loss": 1.0601, | |
| "mean_token_accuracy": 0.6881874799728394, | |
| "num_tokens": 411433.0, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.8689138576779026, | |
| "grad_norm": 2.4795329570770264, | |
| "learning_rate": 7.87313432835821e-06, | |
| "loss": 0.8548, | |
| "mean_token_accuracy": 0.7407911419868469, | |
| "num_tokens": 418859.0, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.8838951310861424, | |
| "grad_norm": 3.0380287170410156, | |
| "learning_rate": 7.835820895522389e-06, | |
| "loss": 1.0231, | |
| "mean_token_accuracy": 0.7012683600187302, | |
| "num_tokens": 425440.0, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.898876404494382, | |
| "grad_norm": 2.596809148788452, | |
| "learning_rate": 7.798507462686568e-06, | |
| "loss": 0.9533, | |
| "mean_token_accuracy": 0.7234554290771484, | |
| "num_tokens": 432346.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.898876404494382, | |
| "eval_loss": 1.0079383850097656, | |
| "eval_mean_token_accuracy": 0.6899242699146271, | |
| "eval_num_tokens": 432346.0, | |
| "eval_runtime": 43.4508, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9138576779026217, | |
| "grad_norm": 2.8112661838531494, | |
| "learning_rate": 7.761194029850747e-06, | |
| "loss": 0.9786, | |
| "mean_token_accuracy": 0.7173863798379898, | |
| "num_tokens": 440164.0, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.9288389513108615, | |
| "grad_norm": 3.0009336471557617, | |
| "learning_rate": 7.723880597014926e-06, | |
| "loss": 1.1157, | |
| "mean_token_accuracy": 0.6876846700906754, | |
| "num_tokens": 446900.0, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.9438202247191011, | |
| "grad_norm": 2.882216215133667, | |
| "learning_rate": 7.686567164179105e-06, | |
| "loss": 0.9868, | |
| "mean_token_accuracy": 0.7076680213212967, | |
| "num_tokens": 454043.0, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.9588014981273408, | |
| "grad_norm": 2.567683219909668, | |
| "learning_rate": 7.649253731343284e-06, | |
| "loss": 0.8569, | |
| "mean_token_accuracy": 0.7410573810338974, | |
| "num_tokens": 461616.0, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.9737827715355806, | |
| "grad_norm": 2.8576714992523193, | |
| "learning_rate": 7.611940298507463e-06, | |
| "loss": 1.0125, | |
| "mean_token_accuracy": 0.7113172262907028, | |
| "num_tokens": 469118.0, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.9887640449438202, | |
| "grad_norm": 2.429183006286621, | |
| "learning_rate": 7.574626865671643e-06, | |
| "loss": 0.8176, | |
| "mean_token_accuracy": 0.7573917061090469, | |
| "num_tokens": 476582.0, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 3.128382444381714, | |
| "learning_rate": 7.537313432835821e-06, | |
| "loss": 0.8825, | |
| "mean_token_accuracy": 0.7674418290456136, | |
| "num_tokens": 480934.0, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 1.0149812734082397, | |
| "grad_norm": 2.555082321166992, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.6467, | |
| "mean_token_accuracy": 0.7994843870401382, | |
| "num_tokens": 488505.0, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.0299625468164795, | |
| "grad_norm": 2.6702513694763184, | |
| "learning_rate": 7.46268656716418e-06, | |
| "loss": 0.6819, | |
| "mean_token_accuracy": 0.8006551265716553, | |
| "num_tokens": 495367.0, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 1.0449438202247192, | |
| "grad_norm": 2.4463446140289307, | |
| "learning_rate": 7.4253731343283585e-06, | |
| "loss": 0.6766, | |
| "mean_token_accuracy": 0.7914196252822876, | |
| "num_tokens": 502801.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0449438202247192, | |
| "eval_loss": 1.000529408454895, | |
| "eval_mean_token_accuracy": 0.6923578282197317, | |
| "eval_num_tokens": 502801.0, | |
| "eval_runtime": 43.4456, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0599250936329587, | |
| "grad_norm": 2.083836793899536, | |
| "learning_rate": 7.3880597014925385e-06, | |
| "loss": 0.5576, | |
| "mean_token_accuracy": 0.830506294965744, | |
| "num_tokens": 509852.0, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 1.0749063670411985, | |
| "grad_norm": 2.3526670932769775, | |
| "learning_rate": 7.350746268656717e-06, | |
| "loss": 0.6305, | |
| "mean_token_accuracy": 0.8095849454402924, | |
| "num_tokens": 517474.0, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.0898876404494382, | |
| "grad_norm": 2.429677724838257, | |
| "learning_rate": 7.313432835820896e-06, | |
| "loss": 0.6376, | |
| "mean_token_accuracy": 0.8020687401294708, | |
| "num_tokens": 524332.0, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 1.104868913857678, | |
| "grad_norm": 2.212477445602417, | |
| "learning_rate": 7.276119402985076e-06, | |
| "loss": 0.5534, | |
| "mean_token_accuracy": 0.8321953117847443, | |
| "num_tokens": 531739.0, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 1.1198501872659177, | |
| "grad_norm": 2.5952513217926025, | |
| "learning_rate": 7.238805970149254e-06, | |
| "loss": 0.6081, | |
| "mean_token_accuracy": 0.8088606148958206, | |
| "num_tokens": 538384.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.1348314606741572, | |
| "grad_norm": 2.4394075870513916, | |
| "learning_rate": 7.2014925373134335e-06, | |
| "loss": 0.5225, | |
| "mean_token_accuracy": 0.8409454524517059, | |
| "num_tokens": 546061.0, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.149812734082397, | |
| "grad_norm": 2.3486266136169434, | |
| "learning_rate": 7.164179104477612e-06, | |
| "loss": 0.6461, | |
| "mean_token_accuracy": 0.8038217276334763, | |
| "num_tokens": 553550.0, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.1647940074906367, | |
| "grad_norm": 2.2467193603515625, | |
| "learning_rate": 7.126865671641792e-06, | |
| "loss": 0.5553, | |
| "mean_token_accuracy": 0.8217796683311462, | |
| "num_tokens": 560708.0, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.1797752808988764, | |
| "grad_norm": 2.407600164413452, | |
| "learning_rate": 7.089552238805971e-06, | |
| "loss": 0.5811, | |
| "mean_token_accuracy": 0.8181268125772476, | |
| "num_tokens": 567407.0, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.1947565543071161, | |
| "grad_norm": 2.376070737838745, | |
| "learning_rate": 7.052238805970149e-06, | |
| "loss": 0.5261, | |
| "mean_token_accuracy": 0.8382349163293839, | |
| "num_tokens": 574718.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.1947565543071161, | |
| "eval_loss": 1.0289438962936401, | |
| "eval_mean_token_accuracy": 0.6871420840422312, | |
| "eval_num_tokens": 574718.0, | |
| "eval_runtime": 43.4624, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2097378277153559, | |
| "grad_norm": 2.738510847091675, | |
| "learning_rate": 7.014925373134329e-06, | |
| "loss": 0.6841, | |
| "mean_token_accuracy": 0.7840229570865631, | |
| "num_tokens": 581276.0, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.2247191011235956, | |
| "grad_norm": 2.3343069553375244, | |
| "learning_rate": 6.9776119402985076e-06, | |
| "loss": 0.5493, | |
| "mean_token_accuracy": 0.8304655849933624, | |
| "num_tokens": 588904.0, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.2397003745318351, | |
| "grad_norm": 2.639784336090088, | |
| "learning_rate": 6.9402985074626876e-06, | |
| "loss": 0.6857, | |
| "mean_token_accuracy": 0.7973657697439194, | |
| "num_tokens": 595858.0, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.2546816479400749, | |
| "grad_norm": 2.3760740756988525, | |
| "learning_rate": 6.902985074626867e-06, | |
| "loss": 0.5136, | |
| "mean_token_accuracy": 0.8422924876213074, | |
| "num_tokens": 602581.0, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.2696629213483146, | |
| "grad_norm": 2.2667794227600098, | |
| "learning_rate": 6.865671641791045e-06, | |
| "loss": 0.5062, | |
| "mean_token_accuracy": 0.83640918135643, | |
| "num_tokens": 609750.0, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.2846441947565543, | |
| "grad_norm": 2.4145731925964355, | |
| "learning_rate": 6.828358208955225e-06, | |
| "loss": 0.653, | |
| "mean_token_accuracy": 0.7934205830097198, | |
| "num_tokens": 617577.0, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.299625468164794, | |
| "grad_norm": 2.5289885997772217, | |
| "learning_rate": 6.791044776119403e-06, | |
| "loss": 0.5714, | |
| "mean_token_accuracy": 0.8144723027944565, | |
| "num_tokens": 623717.0, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.3146067415730336, | |
| "grad_norm": 2.1667280197143555, | |
| "learning_rate": 6.7537313432835825e-06, | |
| "loss": 0.5032, | |
| "mean_token_accuracy": 0.8375886827707291, | |
| "num_tokens": 631372.0, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.3295880149812733, | |
| "grad_norm": 2.4753546714782715, | |
| "learning_rate": 6.7164179104477625e-06, | |
| "loss": 0.5357, | |
| "mean_token_accuracy": 0.8220501989126205, | |
| "num_tokens": 637531.0, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.344569288389513, | |
| "grad_norm": 2.417269706726074, | |
| "learning_rate": 6.679104477611941e-06, | |
| "loss": 0.5053, | |
| "mean_token_accuracy": 0.8365453034639359, | |
| "num_tokens": 643953.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.344569288389513, | |
| "eval_loss": 1.043993592262268, | |
| "eval_mean_token_accuracy": 0.6838832795619965, | |
| "eval_num_tokens": 643953.0, | |
| "eval_runtime": 43.4519, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.3595505617977528, | |
| "grad_norm": 2.4452741146087646, | |
| "learning_rate": 6.64179104477612e-06, | |
| "loss": 0.5959, | |
| "mean_token_accuracy": 0.8205520063638687, | |
| "num_tokens": 650371.0, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.3745318352059925, | |
| "grad_norm": 2.4637227058410645, | |
| "learning_rate": 6.604477611940298e-06, | |
| "loss": 0.609, | |
| "mean_token_accuracy": 0.8141357600688934, | |
| "num_tokens": 657701.0, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.3895131086142323, | |
| "grad_norm": 2.5266008377075195, | |
| "learning_rate": 6.567164179104478e-06, | |
| "loss": 0.5932, | |
| "mean_token_accuracy": 0.8143609464168549, | |
| "num_tokens": 665322.0, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.404494382022472, | |
| "grad_norm": 2.3636891841888428, | |
| "learning_rate": 6.5298507462686575e-06, | |
| "loss": 0.6289, | |
| "mean_token_accuracy": 0.8084951192140579, | |
| "num_tokens": 672852.0, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.4194756554307117, | |
| "grad_norm": 2.303436756134033, | |
| "learning_rate": 6.492537313432837e-06, | |
| "loss": 0.4828, | |
| "mean_token_accuracy": 0.8466101735830307, | |
| "num_tokens": 679760.0, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.4344569288389513, | |
| "grad_norm": 2.34370493888855, | |
| "learning_rate": 6.455223880597016e-06, | |
| "loss": 0.5763, | |
| "mean_token_accuracy": 0.8200996369123459, | |
| "num_tokens": 686791.0, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.449438202247191, | |
| "grad_norm": 2.2648942470550537, | |
| "learning_rate": 6.417910447761194e-06, | |
| "loss": 0.4666, | |
| "mean_token_accuracy": 0.8406203389167786, | |
| "num_tokens": 693028.0, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.4644194756554307, | |
| "grad_norm": 2.3541007041931152, | |
| "learning_rate": 6.380597014925374e-06, | |
| "loss": 0.5937, | |
| "mean_token_accuracy": 0.8213587552309036, | |
| "num_tokens": 701220.0, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.4794007490636705, | |
| "grad_norm": 2.1910910606384277, | |
| "learning_rate": 6.343283582089553e-06, | |
| "loss": 0.5371, | |
| "mean_token_accuracy": 0.8270279318094254, | |
| "num_tokens": 709016.0, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.49438202247191, | |
| "grad_norm": 2.215508460998535, | |
| "learning_rate": 6.3059701492537316e-06, | |
| "loss": 0.5577, | |
| "mean_token_accuracy": 0.8273791819810867, | |
| "num_tokens": 716500.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.49438202247191, | |
| "eval_loss": 1.0362759828567505, | |
| "eval_mean_token_accuracy": 0.6868139902750651, | |
| "eval_num_tokens": 716500.0, | |
| "eval_runtime": 43.4907, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5093632958801497, | |
| "grad_norm": 2.368717908859253, | |
| "learning_rate": 6.2686567164179116e-06, | |
| "loss": 0.5906, | |
| "mean_token_accuracy": 0.817937821149826, | |
| "num_tokens": 723654.0, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.5243445692883895, | |
| "grad_norm": 2.5242090225219727, | |
| "learning_rate": 6.23134328358209e-06, | |
| "loss": 0.6044, | |
| "mean_token_accuracy": 0.8119681477546692, | |
| "num_tokens": 730772.0, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.5393258426966292, | |
| "grad_norm": 2.181792736053467, | |
| "learning_rate": 6.194029850746269e-06, | |
| "loss": 0.5103, | |
| "mean_token_accuracy": 0.8396165817975998, | |
| "num_tokens": 737800.0, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.554307116104869, | |
| "grad_norm": 2.560577392578125, | |
| "learning_rate": 6.156716417910447e-06, | |
| "loss": 0.6276, | |
| "mean_token_accuracy": 0.8078210204839706, | |
| "num_tokens": 744874.0, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.5692883895131087, | |
| "grad_norm": 2.102379322052002, | |
| "learning_rate": 6.119402985074627e-06, | |
| "loss": 0.4746, | |
| "mean_token_accuracy": 0.8518716990947723, | |
| "num_tokens": 752448.0, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.5842696629213484, | |
| "grad_norm": 2.304199695587158, | |
| "learning_rate": 6.0820895522388065e-06, | |
| "loss": 0.5656, | |
| "mean_token_accuracy": 0.8216982781887054, | |
| "num_tokens": 759899.0, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.5992509363295881, | |
| "grad_norm": 2.216642141342163, | |
| "learning_rate": 6.044776119402986e-06, | |
| "loss": 0.5291, | |
| "mean_token_accuracy": 0.8351717591285706, | |
| "num_tokens": 768016.0, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.6142322097378277, | |
| "grad_norm": 2.435307264328003, | |
| "learning_rate": 6.007462686567165e-06, | |
| "loss": 0.6234, | |
| "mean_token_accuracy": 0.8072042167186737, | |
| "num_tokens": 775450.0, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.6292134831460674, | |
| "grad_norm": 2.4394967555999756, | |
| "learning_rate": 5.970149253731343e-06, | |
| "loss": 0.5309, | |
| "mean_token_accuracy": 0.8372008353471756, | |
| "num_tokens": 782761.0, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.6441947565543071, | |
| "grad_norm": 2.545595645904541, | |
| "learning_rate": 5.932835820895523e-06, | |
| "loss": 0.6327, | |
| "mean_token_accuracy": 0.8080395758152008, | |
| "num_tokens": 789777.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.6441947565543071, | |
| "eval_loss": 1.0350403785705566, | |
| "eval_mean_token_accuracy": 0.6877939999103546, | |
| "eval_num_tokens": 789777.0, | |
| "eval_runtime": 43.499, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.6591760299625467, | |
| "grad_norm": 2.6292037963867188, | |
| "learning_rate": 5.895522388059702e-06, | |
| "loss": 0.5986, | |
| "mean_token_accuracy": 0.8076180070638657, | |
| "num_tokens": 797195.0, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.6741573033707864, | |
| "grad_norm": 2.416217803955078, | |
| "learning_rate": 5.858208955223881e-06, | |
| "loss": 0.5786, | |
| "mean_token_accuracy": 0.819035142660141, | |
| "num_tokens": 803750.0, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.6891385767790261, | |
| "grad_norm": 2.511836528778076, | |
| "learning_rate": 5.820895522388061e-06, | |
| "loss": 0.6348, | |
| "mean_token_accuracy": 0.8027018457651138, | |
| "num_tokens": 811335.0, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.7041198501872659, | |
| "grad_norm": 2.4908852577209473, | |
| "learning_rate": 5.783582089552239e-06, | |
| "loss": 0.7, | |
| "mean_token_accuracy": 0.789110392332077, | |
| "num_tokens": 818745.0, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.7191011235955056, | |
| "grad_norm": 2.6129393577575684, | |
| "learning_rate": 5.746268656716418e-06, | |
| "loss": 0.5622, | |
| "mean_token_accuracy": 0.8225392550230026, | |
| "num_tokens": 826035.0, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.7340823970037453, | |
| "grad_norm": 2.579789638519287, | |
| "learning_rate": 5.708955223880598e-06, | |
| "loss": 0.7188, | |
| "mean_token_accuracy": 0.7761909365653992, | |
| "num_tokens": 833714.0, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.749063670411985, | |
| "grad_norm": 2.415377616882324, | |
| "learning_rate": 5.671641791044776e-06, | |
| "loss": 0.5946, | |
| "mean_token_accuracy": 0.8112916648387909, | |
| "num_tokens": 840530.0, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.7640449438202248, | |
| "grad_norm": 2.317101001739502, | |
| "learning_rate": 5.6343283582089556e-06, | |
| "loss": 0.5669, | |
| "mean_token_accuracy": 0.8185659945011139, | |
| "num_tokens": 847520.0, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.7790262172284645, | |
| "grad_norm": 2.222646474838257, | |
| "learning_rate": 5.597014925373134e-06, | |
| "loss": 0.5257, | |
| "mean_token_accuracy": 0.8318661004304886, | |
| "num_tokens": 854801.0, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.7940074906367043, | |
| "grad_norm": 2.343552589416504, | |
| "learning_rate": 5.559701492537314e-06, | |
| "loss": 0.6078, | |
| "mean_token_accuracy": 0.8209575116634369, | |
| "num_tokens": 861837.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.7940074906367043, | |
| "eval_loss": 1.0262762308120728, | |
| "eval_mean_token_accuracy": 0.6882797876993815, | |
| "eval_num_tokens": 861837.0, | |
| "eval_runtime": 43.4358, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.8089887640449438, | |
| "grad_norm": 2.282057762145996, | |
| "learning_rate": 5.522388059701493e-06, | |
| "loss": 0.5822, | |
| "mean_token_accuracy": 0.8263915479183197, | |
| "num_tokens": 868898.0, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.8239700374531835, | |
| "grad_norm": 2.2916383743286133, | |
| "learning_rate": 5.485074626865672e-06, | |
| "loss": 0.4774, | |
| "mean_token_accuracy": 0.8558280915021896, | |
| "num_tokens": 875927.0, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.8389513108614233, | |
| "grad_norm": 2.3634865283966064, | |
| "learning_rate": 5.447761194029851e-06, | |
| "loss": 0.5833, | |
| "mean_token_accuracy": 0.8178814053535461, | |
| "num_tokens": 883203.0, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.8539325842696628, | |
| "grad_norm": 2.382763624191284, | |
| "learning_rate": 5.41044776119403e-06, | |
| "loss": 0.6506, | |
| "mean_token_accuracy": 0.8108739107847214, | |
| "num_tokens": 891081.0, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.8689138576779025, | |
| "grad_norm": 2.2218973636627197, | |
| "learning_rate": 5.37313432835821e-06, | |
| "loss": 0.5196, | |
| "mean_token_accuracy": 0.8333333134651184, | |
| "num_tokens": 899053.0, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.8838951310861423, | |
| "grad_norm": 2.729238748550415, | |
| "learning_rate": 5.335820895522389e-06, | |
| "loss": 0.6499, | |
| "mean_token_accuracy": 0.8122645914554596, | |
| "num_tokens": 905078.0, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.898876404494382, | |
| "grad_norm": 2.129035711288452, | |
| "learning_rate": 5.298507462686567e-06, | |
| "loss": 0.531, | |
| "mean_token_accuracy": 0.8345644921064377, | |
| "num_tokens": 912716.0, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.9138576779026217, | |
| "grad_norm": 2.543597459793091, | |
| "learning_rate": 5.261194029850747e-06, | |
| "loss": 0.5738, | |
| "mean_token_accuracy": 0.8189967423677444, | |
| "num_tokens": 920417.0, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.9288389513108615, | |
| "grad_norm": 2.3534562587738037, | |
| "learning_rate": 5.2238805970149255e-06, | |
| "loss": 0.5738, | |
| "mean_token_accuracy": 0.814308300614357, | |
| "num_tokens": 927855.0, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.9438202247191012, | |
| "grad_norm": 2.3981590270996094, | |
| "learning_rate": 5.186567164179105e-06, | |
| "loss": 0.5595, | |
| "mean_token_accuracy": 0.8211537748575211, | |
| "num_tokens": 934918.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.9438202247191012, | |
| "eval_loss": 1.0272549390792847, | |
| "eval_mean_token_accuracy": 0.6856482028961182, | |
| "eval_num_tokens": 934918.0, | |
| "eval_runtime": 43.4636, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.138, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.958801498127341, | |
| "grad_norm": 2.955871820449829, | |
| "learning_rate": 5.149253731343285e-06, | |
| "loss": 0.6324, | |
| "mean_token_accuracy": 0.8120583146810532, | |
| "num_tokens": 941215.0, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.9737827715355807, | |
| "grad_norm": 2.3583202362060547, | |
| "learning_rate": 5.111940298507463e-06, | |
| "loss": 0.6178, | |
| "mean_token_accuracy": 0.811441496014595, | |
| "num_tokens": 948780.0, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.9887640449438202, | |
| "grad_norm": 2.0456173419952393, | |
| "learning_rate": 5.074626865671642e-06, | |
| "loss": 0.5412, | |
| "mean_token_accuracy": 0.8333507776260376, | |
| "num_tokens": 956929.0, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 2.892012357711792, | |
| "learning_rate": 5.037313432835821e-06, | |
| "loss": 0.6559, | |
| "mean_token_accuracy": 0.8129192392031351, | |
| "num_tokens": 961868.0, | |
| "step": 134 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 268, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.886130817743872e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |