| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9984871406959153, | |
| "eval_steps": 100, | |
| "global_step": 330, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.030257186081694403, | |
| "grad_norm": 48.18317253284181, | |
| "learning_rate": 6.060606060606061e-06, | |
| "loss": 7.4023, | |
| "mean_token_accuracy": 0.13559286370873452, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.060514372163388806, | |
| "grad_norm": 25.86717975049105, | |
| "learning_rate": 1.2121212121212122e-05, | |
| "loss": 6.8148, | |
| "mean_token_accuracy": 0.14477298334240912, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0907715582450832, | |
| "grad_norm": 9.978340525429944, | |
| "learning_rate": 1.8181818181818182e-05, | |
| "loss": 5.6102, | |
| "mean_token_accuracy": 0.17600756287574768, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12102874432677761, | |
| "grad_norm": 4.525649385270933, | |
| "learning_rate": 1.9972599751485225e-05, | |
| "loss": 4.6477, | |
| "mean_token_accuracy": 0.2445007838308811, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.15128593040847202, | |
| "grad_norm": 3.175409164823092, | |
| "learning_rate": 1.9838755799290993e-05, | |
| "loss": 4.0312, | |
| "mean_token_accuracy": 0.30081592202186586, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1815431164901664, | |
| "grad_norm": 2.010787736128858, | |
| "learning_rate": 1.9594929736144978e-05, | |
| "loss": 3.6578, | |
| "mean_token_accuracy": 0.33934821784496305, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.2118003025718608, | |
| "grad_norm": 1.1782901971905857, | |
| "learning_rate": 1.9243847161266924e-05, | |
| "loss": 3.3934, | |
| "mean_token_accuracy": 0.371051961183548, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.24205748865355523, | |
| "grad_norm": 0.9889630793327328, | |
| "learning_rate": 1.8789432636206197e-05, | |
| "loss": 3.3324, | |
| "mean_token_accuracy": 0.378506475687027, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2723146747352496, | |
| "grad_norm": 0.8299178994463533, | |
| "learning_rate": 1.8236765814298328e-05, | |
| "loss": 3.2738, | |
| "mean_token_accuracy": 0.38666853606700896, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.30257186081694404, | |
| "grad_norm": 0.8704351270978057, | |
| "learning_rate": 1.7592024657977432e-05, | |
| "loss": 3.282, | |
| "mean_token_accuracy": 0.38509487807750703, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.30257186081694404, | |
| "eval_runtime": 0.225, | |
| "eval_samples_per_second": 213.326, | |
| "eval_steps_per_second": 13.333, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3328290468986384, | |
| "grad_norm": 0.7957814410722591, | |
| "learning_rate": 1.686241637868734e-05, | |
| "loss": 3.2516, | |
| "mean_token_accuracy": 0.39028556644916534, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.3630862329803328, | |
| "grad_norm": 0.7305776403219497, | |
| "learning_rate": 1.6056096871376667e-05, | |
| "loss": 3.2766, | |
| "mean_token_accuracy": 0.38682268708944323, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.39334341906202724, | |
| "grad_norm": 0.7140945050608877, | |
| "learning_rate": 1.5182079544175957e-05, | |
| "loss": 3.25, | |
| "mean_token_accuracy": 0.39047552198171614, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.4236006051437216, | |
| "grad_norm": 0.7281100436036338, | |
| "learning_rate": 1.4250134562400301e-05, | |
| "loss": 3.2254, | |
| "mean_token_accuracy": 0.39294967502355577, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.45385779122541603, | |
| "grad_norm": 0.7678124673120136, | |
| "learning_rate": 1.3270679633174219e-05, | |
| "loss": 3.2156, | |
| "mean_token_accuracy": 0.39464686065912247, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.48411497730711045, | |
| "grad_norm": 0.7134325713906087, | |
| "learning_rate": 1.2254663551538047e-05, | |
| "loss": 3.1895, | |
| "mean_token_accuracy": 0.39698781073093414, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5143721633888049, | |
| "grad_norm": 0.723513310530251, | |
| "learning_rate": 1.121344380981082e-05, | |
| "loss": 3.2254, | |
| "mean_token_accuracy": 0.39353512674570085, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.5446293494704992, | |
| "grad_norm": 0.7185772760297996, | |
| "learning_rate": 1.015865963834808e-05, | |
| "loss": 3.1977, | |
| "mean_token_accuracy": 0.39701825082302095, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5748865355521936, | |
| "grad_norm": 0.6834580433094664, | |
| "learning_rate": 9.102101896903084e-06, | |
| "loss": 3.2086, | |
| "mean_token_accuracy": 0.39654194712638857, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6051437216338881, | |
| "grad_norm": 0.7155622236506424, | |
| "learning_rate": 8.055581271005292e-06, | |
| "loss": 3.2094, | |
| "mean_token_accuracy": 0.3948154032230377, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6051437216338881, | |
| "eval_runtime": 0.2213, | |
| "eval_samples_per_second": 216.915, | |
| "eval_steps_per_second": 13.557, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6354009077155824, | |
| "grad_norm": 0.6809195871075517, | |
| "learning_rate": 7.0307962467172555e-06, | |
| "loss": 3.1988, | |
| "mean_token_accuracy": 0.3979633778333664, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.6656580937972768, | |
| "grad_norm": 0.707315821198318, | |
| "learning_rate": 6.039202339608432e-06, | |
| "loss": 3.202, | |
| "mean_token_accuracy": 0.3954067021608353, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6959152798789713, | |
| "grad_norm": 0.6904494791556431, | |
| "learning_rate": 5.091884039764321e-06, | |
| "loss": 3.1965, | |
| "mean_token_accuracy": 0.39610737562179565, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.7261724659606656, | |
| "grad_norm": 0.6699258512413571, | |
| "learning_rate": 4.19943090428802e-06, | |
| "loss": 3.2105, | |
| "mean_token_accuracy": 0.39350710064172745, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.75642965204236, | |
| "grad_norm": 0.74188228093062, | |
| "learning_rate": 3.37181918238904e-06, | |
| "loss": 3.2074, | |
| "mean_token_accuracy": 0.39521674066782, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.7866868381240545, | |
| "grad_norm": 0.6872378478922624, | |
| "learning_rate": 2.618300296308135e-06, | |
| "loss": 3.2215, | |
| "mean_token_accuracy": 0.3938901349902153, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.8169440242057489, | |
| "grad_norm": 0.7136188203818471, | |
| "learning_rate": 1.947297424689414e-06, | |
| "loss": 3.1988, | |
| "mean_token_accuracy": 0.39579751938581464, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.8472012102874432, | |
| "grad_norm": 0.6657712543261404, | |
| "learning_rate": 1.3663113444380905e-06, | |
| "loss": 3.202, | |
| "mean_token_accuracy": 0.3932719826698303, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.8774583963691377, | |
| "grad_norm": 0.72900722083696, | |
| "learning_rate": 8.818365836066101e-07, | |
| "loss": 3.1625, | |
| "mean_token_accuracy": 0.4012471452355385, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.9077155824508321, | |
| "grad_norm": 0.6725834304571868, | |
| "learning_rate": 4.992888225905467e-07, | |
| "loss": 3.2246, | |
| "mean_token_accuracy": 0.3945300817489624, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9077155824508321, | |
| "eval_runtime": 0.2212, | |
| "eval_samples_per_second": 217.045, | |
| "eval_steps_per_second": 13.565, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9379727685325264, | |
| "grad_norm": 0.7015653789891463, | |
| "learning_rate": 2.2294435517691504e-07, | |
| "loss": 3.1926, | |
| "mean_token_accuracy": 0.3973464578390121, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.9682299546142209, | |
| "grad_norm": 0.6744365913859031, | |
| "learning_rate": 5.5892286176932875e-08, | |
| "loss": 3.1965, | |
| "mean_token_accuracy": 0.39635206162929537, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.9984871406959153, | |
| "grad_norm": 0.6617082977205768, | |
| "learning_rate": 0.0, | |
| "loss": 3.2031, | |
| "mean_token_accuracy": 0.3948866441845894, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.9984871406959153, | |
| "step": 330, | |
| "total_flos": 5419400756723712.0, | |
| "train_loss": 3.615518465909091, | |
| "train_runtime": 337.9584, | |
| "train_samples_per_second": 31.279, | |
| "train_steps_per_second": 0.976 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 330, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5419400756723712.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |