| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 458, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.39035615921020506, | |
| "epoch": 0.04381161007667032, | |
| "grad_norm": 19.073434829711914, | |
| "learning_rate": 2e-05, | |
| "loss": 4.3018, | |
| "mean_token_accuracy": 0.5682853847742081, | |
| "num_tokens": 29485.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.7484827220439911, | |
| "epoch": 0.08762322015334063, | |
| "grad_norm": 3.970487356185913, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9588, | |
| "mean_token_accuracy": 0.6895880490541458, | |
| "num_tokens": 58977.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.1801635682582856, | |
| "epoch": 0.13143483023001096, | |
| "grad_norm": 2.532576322555542, | |
| "learning_rate": 2e-05, | |
| "loss": 1.431, | |
| "mean_token_accuracy": 0.7343599200248718, | |
| "num_tokens": 88402.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.1498825669288635, | |
| "epoch": 0.17524644030668127, | |
| "grad_norm": 1.982933521270752, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1355, | |
| "mean_token_accuracy": 0.7867384225130081, | |
| "num_tokens": 117810.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.8834485694766044, | |
| "epoch": 0.21905805038335158, | |
| "grad_norm": 1.8390016555786133, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8543, | |
| "mean_token_accuracy": 0.8341205582022667, | |
| "num_tokens": 147327.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5984191231429576, | |
| "epoch": 0.2628696604600219, | |
| "grad_norm": 1.747591257095337, | |
| "learning_rate": 2e-05, | |
| "loss": 0.5852, | |
| "mean_token_accuracy": 0.8857637628912925, | |
| "num_tokens": 176791.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.3483880817890167, | |
| "epoch": 0.3066812705366922, | |
| "grad_norm": 1.5721803903579712, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3545, | |
| "mean_token_accuracy": 0.9292098119854927, | |
| "num_tokens": 206271.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.20057316161692143, | |
| "epoch": 0.35049288061336253, | |
| "grad_norm": 1.2334290742874146, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1877, | |
| "mean_token_accuracy": 0.9710112065076828, | |
| "num_tokens": 235692.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.12095212489366532, | |
| "epoch": 0.39430449069003287, | |
| "grad_norm": 0.8091076016426086, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1073, | |
| "mean_token_accuracy": 0.9890251606702805, | |
| "num_tokens": 265133.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.10107735879719257, | |
| "epoch": 0.43811610076670315, | |
| "grad_norm": 0.6297779679298401, | |
| "learning_rate": 2e-05, | |
| "loss": 0.085, | |
| "mean_token_accuracy": 0.9887343898415566, | |
| "num_tokens": 294684.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.08415136393159628, | |
| "epoch": 0.4819277108433735, | |
| "grad_norm": 0.47038641571998596, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0702, | |
| "mean_token_accuracy": 0.9906241714954376, | |
| "num_tokens": 324140.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07522545410320162, | |
| "epoch": 0.5257393209200438, | |
| "grad_norm": 0.44743451476097107, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0682, | |
| "mean_token_accuracy": 0.9909884691238403, | |
| "num_tokens": 353646.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.07190036196261644, | |
| "epoch": 0.5695509309967142, | |
| "grad_norm": 0.4326375722885132, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0679, | |
| "mean_token_accuracy": 0.990379473567009, | |
| "num_tokens": 383164.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.06936099929735065, | |
| "epoch": 0.6133625410733844, | |
| "grad_norm": 0.3178843557834625, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0638, | |
| "mean_token_accuracy": 0.9907173082232476, | |
| "num_tokens": 412692.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.06100328806787729, | |
| "epoch": 0.6571741511500547, | |
| "grad_norm": 0.3546409606933594, | |
| "learning_rate": 2e-05, | |
| "loss": 0.056, | |
| "mean_token_accuracy": 0.9925005912780762, | |
| "num_tokens": 442159.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.060952140018343925, | |
| "epoch": 0.7009857612267251, | |
| "grad_norm": 0.34292343258857727, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0551, | |
| "mean_token_accuracy": 0.991845327615738, | |
| "num_tokens": 471592.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.06124872919172049, | |
| "epoch": 0.7447973713033954, | |
| "grad_norm": 0.3005734384059906, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0552, | |
| "mean_token_accuracy": 0.9918943449854851, | |
| "num_tokens": 501090.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.05876323413103819, | |
| "epoch": 0.7886089813800657, | |
| "grad_norm": 0.24807888269424438, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0517, | |
| "mean_token_accuracy": 0.9916606426239014, | |
| "num_tokens": 530595.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.058617806807160375, | |
| "epoch": 0.8324205914567361, | |
| "grad_norm": 0.4288617968559265, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0572, | |
| "mean_token_accuracy": 0.9911757484078407, | |
| "num_tokens": 560059.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.05736046200618148, | |
| "epoch": 0.8762322015334063, | |
| "grad_norm": 0.3320342004299164, | |
| "learning_rate": 2e-05, | |
| "loss": 0.05, | |
| "mean_token_accuracy": 0.9926259219646454, | |
| "num_tokens": 589375.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.05618324866518378, | |
| "epoch": 0.9200438116100766, | |
| "grad_norm": 0.33638137578964233, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0525, | |
| "mean_token_accuracy": 0.9919258087873459, | |
| "num_tokens": 618850.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.05968485539779067, | |
| "epoch": 0.963855421686747, | |
| "grad_norm": 0.5651094317436218, | |
| "learning_rate": 2e-05, | |
| "loss": 0.052, | |
| "mean_token_accuracy": 0.9916668817400932, | |
| "num_tokens": 648391.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.060051426794883365, | |
| "epoch": 1.004381161007667, | |
| "grad_norm": 0.31502196192741394, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0486, | |
| "mean_token_accuracy": 0.9919279153282577, | |
| "num_tokens": 675667.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.05292966021224856, | |
| "epoch": 1.0481927710843373, | |
| "grad_norm": 0.40586429834365845, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0475, | |
| "mean_token_accuracy": 0.9920881032943726, | |
| "num_tokens": 705116.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.052853992022573945, | |
| "epoch": 1.0920043811610076, | |
| "grad_norm": 0.2613174021244049, | |
| "learning_rate": 2e-05, | |
| "loss": 0.045, | |
| "mean_token_accuracy": 0.9926601052284241, | |
| "num_tokens": 734545.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.052618366107344626, | |
| "epoch": 1.135815991237678, | |
| "grad_norm": 0.3835960030555725, | |
| "learning_rate": 2e-05, | |
| "loss": 0.044, | |
| "mean_token_accuracy": 0.9924288704991341, | |
| "num_tokens": 764013.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.052085249312222005, | |
| "epoch": 1.1796276013143483, | |
| "grad_norm": 0.35782596468925476, | |
| "learning_rate": 2e-05, | |
| "loss": 0.046, | |
| "mean_token_accuracy": 0.9919404909014702, | |
| "num_tokens": 793540.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.05476817348971963, | |
| "epoch": 1.2234392113910186, | |
| "grad_norm": 0.30708274245262146, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0474, | |
| "mean_token_accuracy": 0.9918309196829795, | |
| "num_tokens": 823042.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 0.05322778979316354, | |
| "epoch": 1.267250821467689, | |
| "grad_norm": 0.2604309022426605, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0424, | |
| "mean_token_accuracy": 0.9924165293574333, | |
| "num_tokens": 852457.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 0.04979227380827069, | |
| "epoch": 1.3110624315443593, | |
| "grad_norm": 0.334300696849823, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0404, | |
| "mean_token_accuracy": 0.9923597663640976, | |
| "num_tokens": 881910.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.05080299507826567, | |
| "epoch": 1.3548740416210296, | |
| "grad_norm": 0.3369467854499817, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0429, | |
| "mean_token_accuracy": 0.9918962031602859, | |
| "num_tokens": 911418.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 0.052211628574877975, | |
| "epoch": 1.3986856516976998, | |
| "grad_norm": 0.49774906039237976, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0442, | |
| "mean_token_accuracy": 0.9922201976180076, | |
| "num_tokens": 940867.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 0.05023647788912058, | |
| "epoch": 1.44249726177437, | |
| "grad_norm": 0.3457210659980774, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0406, | |
| "mean_token_accuracy": 0.9923922121524811, | |
| "num_tokens": 970331.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 0.047794731613248584, | |
| "epoch": 1.4863088718510404, | |
| "grad_norm": 0.35947972536087036, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0417, | |
| "mean_token_accuracy": 0.992406377196312, | |
| "num_tokens": 999841.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 0.047655423637479544, | |
| "epoch": 1.5301204819277108, | |
| "grad_norm": 0.37163057923316956, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0405, | |
| "mean_token_accuracy": 0.9928415760397911, | |
| "num_tokens": 1029329.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.04932913850061595, | |
| "epoch": 1.5739320920043811, | |
| "grad_norm": 0.34155628085136414, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0409, | |
| "mean_token_accuracy": 0.992286778986454, | |
| "num_tokens": 1058772.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 0.05013784933835268, | |
| "epoch": 1.6177437020810514, | |
| "grad_norm": 0.3228084444999695, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0385, | |
| "mean_token_accuracy": 0.9925702095031739, | |
| "num_tokens": 1088250.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 0.04742059959098697, | |
| "epoch": 1.6615553121577218, | |
| "grad_norm": 0.2427096664905548, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0348, | |
| "mean_token_accuracy": 0.9927995279431343, | |
| "num_tokens": 1117693.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 0.04431099114008248, | |
| "epoch": 1.7053669222343921, | |
| "grad_norm": 0.2118023931980133, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0349, | |
| "mean_token_accuracy": 0.9929380178451538, | |
| "num_tokens": 1147140.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 0.04604467884637416, | |
| "epoch": 1.7491785323110625, | |
| "grad_norm": 0.25765758752822876, | |
| "learning_rate": 2e-05, | |
| "loss": 0.037, | |
| "mean_token_accuracy": 0.9926380544900895, | |
| "num_tokens": 1176644.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.04647672027349472, | |
| "epoch": 1.7929901423877328, | |
| "grad_norm": 0.2914351522922516, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0362, | |
| "mean_token_accuracy": 0.99278933852911, | |
| "num_tokens": 1206205.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 0.04514645580202341, | |
| "epoch": 1.8368017524644031, | |
| "grad_norm": 0.23769572377204895, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0375, | |
| "mean_token_accuracy": 0.992934164404869, | |
| "num_tokens": 1235651.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 0.044267228711396454, | |
| "epoch": 1.8806133625410735, | |
| "grad_norm": 0.22526511549949646, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0331, | |
| "mean_token_accuracy": 0.9943611547350883, | |
| "num_tokens": 1265105.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 0.04463120717555284, | |
| "epoch": 1.9244249726177438, | |
| "grad_norm": 0.27114230394363403, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0348, | |
| "mean_token_accuracy": 0.99424988925457, | |
| "num_tokens": 1294507.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 0.045189128536731, | |
| "epoch": 1.9682365826944141, | |
| "grad_norm": 0.3721632957458496, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0348, | |
| "mean_token_accuracy": 0.9939030453562736, | |
| "num_tokens": 1324016.0, | |
| "step": 450 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1832, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 840521320303104.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |