| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 229, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.39035615921020506, | |
| "epoch": 0.04381161007667032, | |
| "grad_norm": 19.073434829711914, | |
| "learning_rate": 2e-05, | |
| "loss": 4.3018, | |
| "mean_token_accuracy": 0.5682853847742081, | |
| "num_tokens": 29485.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.7484827220439911, | |
| "epoch": 0.08762322015334063, | |
| "grad_norm": 3.970487356185913, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9588, | |
| "mean_token_accuracy": 0.6895880490541458, | |
| "num_tokens": 58977.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.1801635682582856, | |
| "epoch": 0.13143483023001096, | |
| "grad_norm": 2.532576322555542, | |
| "learning_rate": 2e-05, | |
| "loss": 1.431, | |
| "mean_token_accuracy": 0.7343599200248718, | |
| "num_tokens": 88402.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.1498825669288635, | |
| "epoch": 0.17524644030668127, | |
| "grad_norm": 1.982933521270752, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1355, | |
| "mean_token_accuracy": 0.7867384225130081, | |
| "num_tokens": 117810.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.8834485694766044, | |
| "epoch": 0.21905805038335158, | |
| "grad_norm": 1.8390016555786133, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8543, | |
| "mean_token_accuracy": 0.8341205582022667, | |
| "num_tokens": 147327.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5984191231429576, | |
| "epoch": 0.2628696604600219, | |
| "grad_norm": 1.747591257095337, | |
| "learning_rate": 2e-05, | |
| "loss": 0.5852, | |
| "mean_token_accuracy": 0.8857637628912925, | |
| "num_tokens": 176791.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.3483880817890167, | |
| "epoch": 0.3066812705366922, | |
| "grad_norm": 1.5721803903579712, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3545, | |
| "mean_token_accuracy": 0.9292098119854927, | |
| "num_tokens": 206271.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.20057316161692143, | |
| "epoch": 0.35049288061336253, | |
| "grad_norm": 1.2334290742874146, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1877, | |
| "mean_token_accuracy": 0.9710112065076828, | |
| "num_tokens": 235692.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.12095212489366532, | |
| "epoch": 0.39430449069003287, | |
| "grad_norm": 0.8091076016426086, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1073, | |
| "mean_token_accuracy": 0.9890251606702805, | |
| "num_tokens": 265133.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.10107735879719257, | |
| "epoch": 0.43811610076670315, | |
| "grad_norm": 0.6297779679298401, | |
| "learning_rate": 2e-05, | |
| "loss": 0.085, | |
| "mean_token_accuracy": 0.9887343898415566, | |
| "num_tokens": 294684.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.08415136393159628, | |
| "epoch": 0.4819277108433735, | |
| "grad_norm": 0.47038641571998596, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0702, | |
| "mean_token_accuracy": 0.9906241714954376, | |
| "num_tokens": 324140.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07522545410320162, | |
| "epoch": 0.5257393209200438, | |
| "grad_norm": 0.44743451476097107, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0682, | |
| "mean_token_accuracy": 0.9909884691238403, | |
| "num_tokens": 353646.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.07190036196261644, | |
| "epoch": 0.5695509309967142, | |
| "grad_norm": 0.4326375722885132, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0679, | |
| "mean_token_accuracy": 0.990379473567009, | |
| "num_tokens": 383164.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.06936099929735065, | |
| "epoch": 0.6133625410733844, | |
| "grad_norm": 0.3178843557834625, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0638, | |
| "mean_token_accuracy": 0.9907173082232476, | |
| "num_tokens": 412692.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.06100328806787729, | |
| "epoch": 0.6571741511500547, | |
| "grad_norm": 0.3546409606933594, | |
| "learning_rate": 2e-05, | |
| "loss": 0.056, | |
| "mean_token_accuracy": 0.9925005912780762, | |
| "num_tokens": 442159.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.060952140018343925, | |
| "epoch": 0.7009857612267251, | |
| "grad_norm": 0.34292343258857727, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0551, | |
| "mean_token_accuracy": 0.991845327615738, | |
| "num_tokens": 471592.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.06124872919172049, | |
| "epoch": 0.7447973713033954, | |
| "grad_norm": 0.3005734384059906, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0552, | |
| "mean_token_accuracy": 0.9918943449854851, | |
| "num_tokens": 501090.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.05876323413103819, | |
| "epoch": 0.7886089813800657, | |
| "grad_norm": 0.24807888269424438, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0517, | |
| "mean_token_accuracy": 0.9916606426239014, | |
| "num_tokens": 530595.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.058617806807160375, | |
| "epoch": 0.8324205914567361, | |
| "grad_norm": 0.4288617968559265, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0572, | |
| "mean_token_accuracy": 0.9911757484078407, | |
| "num_tokens": 560059.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.05736046200618148, | |
| "epoch": 0.8762322015334063, | |
| "grad_norm": 0.3320342004299164, | |
| "learning_rate": 2e-05, | |
| "loss": 0.05, | |
| "mean_token_accuracy": 0.9926259219646454, | |
| "num_tokens": 589375.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.05618324866518378, | |
| "epoch": 0.9200438116100766, | |
| "grad_norm": 0.33638137578964233, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0525, | |
| "mean_token_accuracy": 0.9919258087873459, | |
| "num_tokens": 618850.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.05968485539779067, | |
| "epoch": 0.963855421686747, | |
| "grad_norm": 0.5651094317436218, | |
| "learning_rate": 2e-05, | |
| "loss": 0.052, | |
| "mean_token_accuracy": 0.9916668817400932, | |
| "num_tokens": 648391.0, | |
| "step": 220 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1832, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 420260660151552.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |