| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 13.695652173913043, | |
| "eval_steps": 500, | |
| "global_step": 70, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.17391304347826086, | |
| "grad_norm": 1.7925595045089722, | |
| "learning_rate": 2e-05, | |
| "loss": 2.3441, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.34782608695652173, | |
| "grad_norm": 1.6493580341339111, | |
| "learning_rate": 4e-05, | |
| "loss": 2.3631, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.5217391304347826, | |
| "grad_norm": 1.601073980331421, | |
| "learning_rate": 6e-05, | |
| "loss": 2.2615, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.6956521739130435, | |
| "grad_norm": 1.6346150636672974, | |
| "learning_rate": 8e-05, | |
| "loss": 2.3701, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 1.5244935750961304, | |
| "learning_rate": 0.0001, | |
| "loss": 1.9692, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 1.0869565217391304, | |
| "grad_norm": 3.000169038772583, | |
| "learning_rate": 0.00012, | |
| "loss": 3.3233, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 1.2608695652173914, | |
| "grad_norm": 1.265796184539795, | |
| "learning_rate": 0.00014, | |
| "loss": 1.4917, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 1.434782608695652, | |
| "grad_norm": 1.1824357509613037, | |
| "learning_rate": 0.00016, | |
| "loss": 1.3508, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 1.608695652173913, | |
| "grad_norm": 1.0549633502960205, | |
| "learning_rate": 0.00018, | |
| "loss": 0.7712, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 1.7826086956521738, | |
| "grad_norm": 0.9444736242294312, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6453, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.9565217391304348, | |
| "grad_norm": 1.1795233488082886, | |
| "learning_rate": 0.0001998629534754574, | |
| "loss": 0.6652, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 2.1739130434782608, | |
| "grad_norm": 1.0760518312454224, | |
| "learning_rate": 0.00019945218953682734, | |
| "loss": 0.3256, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 2.3478260869565215, | |
| "grad_norm": 0.7688791155815125, | |
| "learning_rate": 0.00019876883405951377, | |
| "loss": 0.287, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 2.5217391304347827, | |
| "grad_norm": 0.5655398368835449, | |
| "learning_rate": 0.00019781476007338058, | |
| "loss": 0.2031, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 2.6956521739130435, | |
| "grad_norm": 0.5574740171432495, | |
| "learning_rate": 0.00019659258262890683, | |
| "loss": 0.1848, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 2.869565217391304, | |
| "grad_norm": 0.409333199262619, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 0.1504, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 3.0869565217391304, | |
| "grad_norm": 0.48374369740486145, | |
| "learning_rate": 0.00019335804264972018, | |
| "loss": 0.2207, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 3.260869565217391, | |
| "grad_norm": 0.31125733256340027, | |
| "learning_rate": 0.0001913545457642601, | |
| "loss": 0.1039, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 3.4347826086956523, | |
| "grad_norm": 0.24773700535297394, | |
| "learning_rate": 0.0001891006524188368, | |
| "loss": 0.0904, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 3.608695652173913, | |
| "grad_norm": 0.4116332232952118, | |
| "learning_rate": 0.00018660254037844388, | |
| "loss": 0.1124, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 3.782608695652174, | |
| "grad_norm": 0.29519736766815186, | |
| "learning_rate": 0.00018386705679454242, | |
| "loss": 0.0816, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 3.9565217391304346, | |
| "grad_norm": 0.48144784569740295, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 0.1117, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 4.173913043478261, | |
| "grad_norm": 0.12347900867462158, | |
| "learning_rate": 0.0001777145961456971, | |
| "loss": 0.0595, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 4.3478260869565215, | |
| "grad_norm": 0.345081627368927, | |
| "learning_rate": 0.00017431448254773944, | |
| "loss": 0.07, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 4.521739130434782, | |
| "grad_norm": 0.16552160680294037, | |
| "learning_rate": 0.00017071067811865476, | |
| "loss": 0.0497, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 4.695652173913043, | |
| "grad_norm": 0.20713075995445251, | |
| "learning_rate": 0.00016691306063588583, | |
| "loss": 0.0495, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 4.869565217391305, | |
| "grad_norm": 0.273133784532547, | |
| "learning_rate": 0.00016293203910498376, | |
| "loss": 0.0745, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 5.086956521739131, | |
| "grad_norm": 0.3325965404510498, | |
| "learning_rate": 0.00015877852522924732, | |
| "loss": 0.0893, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 5.260869565217392, | |
| "grad_norm": 0.0951855406165123, | |
| "learning_rate": 0.00015446390350150273, | |
| "loss": 0.0319, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 5.434782608695652, | |
| "grad_norm": 0.2029908150434494, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 0.0542, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 5.608695652173913, | |
| "grad_norm": 0.220689058303833, | |
| "learning_rate": 0.00014539904997395468, | |
| "loss": 0.0512, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 5.782608695652174, | |
| "grad_norm": 0.21392984688282013, | |
| "learning_rate": 0.00014067366430758004, | |
| "loss": 0.0446, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 5.956521739130435, | |
| "grad_norm": 0.18461254239082336, | |
| "learning_rate": 0.00013583679495453, | |
| "loss": 0.0611, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 6.173913043478261, | |
| "grad_norm": 2.2200334072113037, | |
| "learning_rate": 0.00013090169943749476, | |
| "loss": 0.1033, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 6.3478260869565215, | |
| "grad_norm": 0.11724428087472916, | |
| "learning_rate": 0.00012588190451025207, | |
| "loss": 0.0358, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 6.521739130434782, | |
| "grad_norm": 0.19415688514709473, | |
| "learning_rate": 0.00012079116908177593, | |
| "loss": 0.0391, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 6.695652173913043, | |
| "grad_norm": 0.19208520650863647, | |
| "learning_rate": 0.0001156434465040231, | |
| "loss": 0.0454, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 6.869565217391305, | |
| "grad_norm": 0.18963392078876495, | |
| "learning_rate": 0.00011045284632676536, | |
| "loss": 0.0438, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 7.086956521739131, | |
| "grad_norm": 0.15887770056724548, | |
| "learning_rate": 0.0001052335956242944, | |
| "loss": 0.0529, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 7.260869565217392, | |
| "grad_norm": 0.10326984524726868, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0302, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 7.434782608695652, | |
| "grad_norm": 0.10307444632053375, | |
| "learning_rate": 9.476640437570562e-05, | |
| "loss": 0.0389, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 7.608695652173913, | |
| "grad_norm": 0.16696208715438843, | |
| "learning_rate": 8.954715367323468e-05, | |
| "loss": 0.0427, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 7.782608695652174, | |
| "grad_norm": 0.12140149623155594, | |
| "learning_rate": 8.435655349597689e-05, | |
| "loss": 0.0324, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 7.956521739130435, | |
| "grad_norm": 0.15214045345783234, | |
| "learning_rate": 7.920883091822408e-05, | |
| "loss": 0.0465, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 8.173913043478262, | |
| "grad_norm": 0.1985018402338028, | |
| "learning_rate": 7.411809548974792e-05, | |
| "loss": 0.0366, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 8.347826086956522, | |
| "grad_norm": 0.06822706758975983, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.0284, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 8.521739130434783, | |
| "grad_norm": 0.07317953556776047, | |
| "learning_rate": 6.416320504546997e-05, | |
| "loss": 0.0358, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 8.695652173913043, | |
| "grad_norm": 0.05046230927109718, | |
| "learning_rate": 5.9326335692419995e-05, | |
| "loss": 0.0284, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 8.869565217391305, | |
| "grad_norm": 0.07171109318733215, | |
| "learning_rate": 5.4600950026045326e-05, | |
| "loss": 0.0313, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 9.08695652173913, | |
| "grad_norm": 0.09738242626190186, | |
| "learning_rate": 5.000000000000002e-05, | |
| "loss": 0.0381, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 9.26086956521739, | |
| "grad_norm": 0.07053200155496597, | |
| "learning_rate": 4.5536096498497295e-05, | |
| "loss": 0.0293, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 9.434782608695652, | |
| "grad_norm": 0.11508598923683167, | |
| "learning_rate": 4.12214747707527e-05, | |
| "loss": 0.0257, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 9.608695652173914, | |
| "grad_norm": 0.06967729330062866, | |
| "learning_rate": 3.7067960895016275e-05, | |
| "loss": 0.0273, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 9.782608695652174, | |
| "grad_norm": 0.06969133019447327, | |
| "learning_rate": 3.308693936411421e-05, | |
| "loss": 0.0327, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 9.956521739130435, | |
| "grad_norm": 0.12203659862279892, | |
| "learning_rate": 2.9289321881345254e-05, | |
| "loss": 0.0453, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 11.173913043478262, | |
| "grad_norm": 0.12760570645332336, | |
| "learning_rate": 2.5685517452260567e-05, | |
| "loss": 0.0278, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 11.347826086956522, | |
| "grad_norm": 0.06412295252084732, | |
| "learning_rate": 2.2285403854302912e-05, | |
| "loss": 0.0234, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 11.521739130434783, | |
| "grad_norm": 0.11282885074615479, | |
| "learning_rate": 1.9098300562505266e-05, | |
| "loss": 0.0332, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 11.695652173913043, | |
| "grad_norm": 0.07060640305280685, | |
| "learning_rate": 1.6132943205457606e-05, | |
| "loss": 0.0286, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 11.869565217391305, | |
| "grad_norm": 0.14442871510982513, | |
| "learning_rate": 1.339745962155613e-05, | |
| "loss": 0.0319, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 12.08695652173913, | |
| "grad_norm": 0.17993809282779694, | |
| "learning_rate": 1.0899347581163221e-05, | |
| "loss": 0.0532, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 12.26086956521739, | |
| "grad_norm": 0.0468832366168499, | |
| "learning_rate": 8.645454235739903e-06, | |
| "loss": 0.0234, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 12.434782608695652, | |
| "grad_norm": 0.09955104440450668, | |
| "learning_rate": 6.6419573502798374e-06, | |
| "loss": 0.0321, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 12.608695652173914, | |
| "grad_norm": 0.1094796434044838, | |
| "learning_rate": 4.8943483704846475e-06, | |
| "loss": 0.0315, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 12.782608695652174, | |
| "grad_norm": 0.06971123814582825, | |
| "learning_rate": 3.40741737109318e-06, | |
| "loss": 0.0208, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 12.956521739130435, | |
| "grad_norm": 0.21308307349681854, | |
| "learning_rate": 2.1852399266194314e-06, | |
| "loss": 0.0427, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 13.173913043478262, | |
| "grad_norm": 0.06157653033733368, | |
| "learning_rate": 1.231165940486234e-06, | |
| "loss": 0.0258, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 13.347826086956522, | |
| "grad_norm": 0.05918806418776512, | |
| "learning_rate": 5.478104631726711e-07, | |
| "loss": 0.0277, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 13.521739130434783, | |
| "grad_norm": 0.08736610412597656, | |
| "learning_rate": 1.3704652454261668e-07, | |
| "loss": 0.024, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 13.695652173913043, | |
| "grad_norm": 0.09696591645479202, | |
| "learning_rate": 0.0, | |
| "loss": 0.0337, | |
| "step": 70 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 70, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 14, | |
| "save_steps": 5, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 393796424564736.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |