| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 12.695652173913043, | |
| "eval_steps": 500, | |
| "global_step": 70, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.17391304347826086, | |
| "grad_norm": 6.764804363250732, | |
| "learning_rate": 2e-05, | |
| "loss": 2.5611, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.34782608695652173, | |
| "grad_norm": 5.956302165985107, | |
| "learning_rate": 4e-05, | |
| "loss": 2.6708, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.5217391304347826, | |
| "grad_norm": 5.123376369476318, | |
| "learning_rate": 6e-05, | |
| "loss": 2.5466, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.6956521739130435, | |
| "grad_norm": 6.901731014251709, | |
| "learning_rate": 8e-05, | |
| "loss": 2.6007, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 7.963821887969971, | |
| "learning_rate": 0.0001, | |
| "loss": 2.4327, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 1.0869565217391304, | |
| "grad_norm": 24.67555046081543, | |
| "learning_rate": 0.00012, | |
| "loss": 3.6948, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 1.2608695652173914, | |
| "grad_norm": 11.961183547973633, | |
| "learning_rate": 0.00014, | |
| "loss": 1.8845, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 1.434782608695652, | |
| "grad_norm": 7.313081741333008, | |
| "learning_rate": 0.00016, | |
| "loss": 1.6564, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 1.608695652173913, | |
| "grad_norm": 7.329380512237549, | |
| "learning_rate": 0.00018, | |
| "loss": 1.2679, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 1.7826086956521738, | |
| "grad_norm": 5.3990092277526855, | |
| "learning_rate": 0.0002, | |
| "loss": 1.1521, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.9565217391304348, | |
| "grad_norm": 19.579421997070312, | |
| "learning_rate": 0.0001998629534754574, | |
| "loss": 1.3572, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 2.1739130434782608, | |
| "grad_norm": 9.876869201660156, | |
| "learning_rate": 0.00019945218953682734, | |
| "loss": 0.6523, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 2.3478260869565215, | |
| "grad_norm": 4.188257217407227, | |
| "learning_rate": 0.00019876883405951377, | |
| "loss": 0.5632, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 2.5217391304347827, | |
| "grad_norm": 5.128059387207031, | |
| "learning_rate": 0.00019781476007338058, | |
| "loss": 0.4172, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 2.6956521739130435, | |
| "grad_norm": 1.45033597946167, | |
| "learning_rate": 0.00019659258262890683, | |
| "loss": 0.3559, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 2.869565217391304, | |
| "grad_norm": 4.084733963012695, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 0.2889, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 3.0869565217391304, | |
| "grad_norm": 1.971590518951416, | |
| "learning_rate": 0.00019335804264972018, | |
| "loss": 0.3996, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 3.260869565217391, | |
| "grad_norm": 0.9812415838241577, | |
| "learning_rate": 0.0001913545457642601, | |
| "loss": 0.1805, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 3.4347826086956523, | |
| "grad_norm": 0.5682662129402161, | |
| "learning_rate": 0.0001891006524188368, | |
| "loss": 0.1469, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 3.608695652173913, | |
| "grad_norm": 0.7722249627113342, | |
| "learning_rate": 0.00018660254037844388, | |
| "loss": 0.16, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 3.782608695652174, | |
| "grad_norm": 2.2477004528045654, | |
| "learning_rate": 0.00018386705679454242, | |
| "loss": 0.1266, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 3.9565217391304346, | |
| "grad_norm": 1.3397961854934692, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 0.1924, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 4.173913043478261, | |
| "grad_norm": 1.588268518447876, | |
| "learning_rate": 0.0001777145961456971, | |
| "loss": 0.0942, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 4.3478260869565215, | |
| "grad_norm": 0.5260263085365295, | |
| "learning_rate": 0.00017431448254773944, | |
| "loss": 0.0984, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 4.521739130434782, | |
| "grad_norm": 0.6609199643135071, | |
| "learning_rate": 0.00017071067811865476, | |
| "loss": 0.0809, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 4.695652173913043, | |
| "grad_norm": 0.3053271770477295, | |
| "learning_rate": 0.00016691306063588583, | |
| "loss": 0.0661, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 4.869565217391305, | |
| "grad_norm": 0.39704304933547974, | |
| "learning_rate": 0.00016293203910498376, | |
| "loss": 0.0811, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 5.086956521739131, | |
| "grad_norm": 1.004157543182373, | |
| "learning_rate": 0.00015877852522924732, | |
| "loss": 0.1324, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 5.260869565217392, | |
| "grad_norm": 0.3579670488834381, | |
| "learning_rate": 0.00015446390350150273, | |
| "loss": 0.0494, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 5.434782608695652, | |
| "grad_norm": 0.2634657621383667, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 0.0637, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 5.608695652173913, | |
| "grad_norm": 0.3780810534954071, | |
| "learning_rate": 0.00014539904997395468, | |
| "loss": 0.0603, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 5.782608695652174, | |
| "grad_norm": 0.4811391532421112, | |
| "learning_rate": 0.00014067366430758004, | |
| "loss": 0.059, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 5.956521739130435, | |
| "grad_norm": 0.4578228294849396, | |
| "learning_rate": 0.00013583679495453, | |
| "loss": 0.0881, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 6.173913043478261, | |
| "grad_norm": 0.7382459044456482, | |
| "learning_rate": 0.00013090169943749476, | |
| "loss": 0.0523, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 6.3478260869565215, | |
| "grad_norm": 0.6557295322418213, | |
| "learning_rate": 0.00012588190451025207, | |
| "loss": 0.042, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 6.521739130434782, | |
| "grad_norm": 0.5470027327537537, | |
| "learning_rate": 0.00012079116908177593, | |
| "loss": 0.0415, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 6.695652173913043, | |
| "grad_norm": 0.6779957413673401, | |
| "learning_rate": 0.0001156434465040231, | |
| "loss": 0.0563, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 6.869565217391305, | |
| "grad_norm": 0.16265903413295746, | |
| "learning_rate": 0.00011045284632676536, | |
| "loss": 0.0416, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 7.086956521739131, | |
| "grad_norm": 0.1718588024377823, | |
| "learning_rate": 0.0001052335956242944, | |
| "loss": 0.0563, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 7.260869565217392, | |
| "grad_norm": 0.9964053630828857, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0415, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 7.434782608695652, | |
| "grad_norm": 1.2380071878433228, | |
| "learning_rate": 9.476640437570562e-05, | |
| "loss": 0.0486, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 7.608695652173913, | |
| "grad_norm": 0.38603806495666504, | |
| "learning_rate": 8.954715367323468e-05, | |
| "loss": 0.0467, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 7.782608695652174, | |
| "grad_norm": 0.2892161011695862, | |
| "learning_rate": 8.435655349597689e-05, | |
| "loss": 0.0298, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 7.956521739130435, | |
| "grad_norm": 0.32837310433387756, | |
| "learning_rate": 7.920883091822408e-05, | |
| "loss": 0.0544, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 8.173913043478262, | |
| "grad_norm": 0.12985481321811676, | |
| "learning_rate": 7.411809548974792e-05, | |
| "loss": 0.0347, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 8.347826086956522, | |
| "grad_norm": 0.2492673248052597, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.0441, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 8.521739130434783, | |
| "grad_norm": 2.073184013366699, | |
| "learning_rate": 6.416320504546997e-05, | |
| "loss": 0.043, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 8.695652173913043, | |
| "grad_norm": 0.09984829276800156, | |
| "learning_rate": 5.9326335692419995e-05, | |
| "loss": 0.0328, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 8.869565217391305, | |
| "grad_norm": 1.290177345275879, | |
| "learning_rate": 5.4600950026045326e-05, | |
| "loss": 0.0382, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 9.08695652173913, | |
| "grad_norm": 0.8741787672042847, | |
| "learning_rate": 5.000000000000002e-05, | |
| "loss": 0.0487, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 9.26086956521739, | |
| "grad_norm": 1.1984590291976929, | |
| "learning_rate": 4.5536096498497295e-05, | |
| "loss": 0.0382, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 9.434782608695652, | |
| "grad_norm": 0.1054629385471344, | |
| "learning_rate": 4.12214747707527e-05, | |
| "loss": 0.0275, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 9.608695652173914, | |
| "grad_norm": 0.06806695461273193, | |
| "learning_rate": 3.7067960895016275e-05, | |
| "loss": 0.0314, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 9.782608695652174, | |
| "grad_norm": 0.29768458008766174, | |
| "learning_rate": 3.308693936411421e-05, | |
| "loss": 0.039, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 9.956521739130435, | |
| "grad_norm": 1.4097405672073364, | |
| "learning_rate": 2.9289321881345254e-05, | |
| "loss": 0.0516, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 10.173913043478262, | |
| "grad_norm": 0.31604883074760437, | |
| "learning_rate": 2.5685517452260567e-05, | |
| "loss": 0.0375, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 10.347826086956522, | |
| "grad_norm": 0.14812345802783966, | |
| "learning_rate": 2.2285403854302912e-05, | |
| "loss": 0.0296, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 10.521739130434783, | |
| "grad_norm": 0.1633218377828598, | |
| "learning_rate": 1.9098300562505266e-05, | |
| "loss": 0.0437, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 10.695652173913043, | |
| "grad_norm": 0.1193070039153099, | |
| "learning_rate": 1.6132943205457606e-05, | |
| "loss": 0.0298, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 10.869565217391305, | |
| "grad_norm": 0.1309748888015747, | |
| "learning_rate": 1.339745962155613e-05, | |
| "loss": 0.0345, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 11.08695652173913, | |
| "grad_norm": 0.1206737831234932, | |
| "learning_rate": 1.0899347581163221e-05, | |
| "loss": 0.0327, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 11.26086956521739, | |
| "grad_norm": 0.07418113946914673, | |
| "learning_rate": 8.645454235739903e-06, | |
| "loss": 0.0309, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 11.434782608695652, | |
| "grad_norm": 0.24486000835895538, | |
| "learning_rate": 6.6419573502798374e-06, | |
| "loss": 0.0347, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 11.608695652173914, | |
| "grad_norm": 0.2284393310546875, | |
| "learning_rate": 4.8943483704846475e-06, | |
| "loss": 0.0252, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 11.782608695652174, | |
| "grad_norm": 0.19318842887878418, | |
| "learning_rate": 3.40741737109318e-06, | |
| "loss": 0.0377, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 11.956521739130435, | |
| "grad_norm": 0.1955549567937851, | |
| "learning_rate": 2.1852399266194314e-06, | |
| "loss": 0.0564, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 12.173913043478262, | |
| "grad_norm": 0.14949171245098114, | |
| "learning_rate": 1.231165940486234e-06, | |
| "loss": 0.0364, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 12.347826086956522, | |
| "grad_norm": 0.32537975907325745, | |
| "learning_rate": 5.478104631726711e-07, | |
| "loss": 0.0402, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 12.521739130434783, | |
| "grad_norm": 0.08832496404647827, | |
| "learning_rate": 1.3704652454261668e-07, | |
| "loss": 0.0289, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 12.695652173913043, | |
| "grad_norm": 0.10512495785951614, | |
| "learning_rate": 0.0, | |
| "loss": 0.0324, | |
| "step": 70 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 70, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 14, | |
| "save_steps": 5, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 144357206261760.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |