| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 77, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012987012987012988, | |
| "grad_norm": 30.513944625854492, | |
| "learning_rate": 5.0000000000000004e-08, | |
| "loss": 4.1013, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.025974025974025976, | |
| "grad_norm": 30.965682983398438, | |
| "learning_rate": 1.0000000000000001e-07, | |
| "loss": 4.1757, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.03896103896103896, | |
| "grad_norm": 30.834516525268555, | |
| "learning_rate": 1.5000000000000002e-07, | |
| "loss": 4.1371, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05194805194805195, | |
| "grad_norm": 30.98870849609375, | |
| "learning_rate": 2.0000000000000002e-07, | |
| "loss": 4.0543, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06493506493506493, | |
| "grad_norm": 30.404874801635742, | |
| "learning_rate": 2.5000000000000004e-07, | |
| "loss": 4.09, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07792207792207792, | |
| "grad_norm": 31.433774948120117, | |
| "learning_rate": 3.0000000000000004e-07, | |
| "loss": 4.1777, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09090909090909091, | |
| "grad_norm": 30.850692749023438, | |
| "learning_rate": 3.5000000000000004e-07, | |
| "loss": 4.142, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.1038961038961039, | |
| "grad_norm": 31.00360870361328, | |
| "learning_rate": 4.0000000000000003e-07, | |
| "loss": 4.1115, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11688311688311688, | |
| "grad_norm": 31.221616744995117, | |
| "learning_rate": 4.5000000000000003e-07, | |
| "loss": 4.1585, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.12987012987012986, | |
| "grad_norm": 30.560623168945312, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 4.1155, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 30.089527130126953, | |
| "learning_rate": 5.5e-07, | |
| "loss": 4.0638, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15584415584415584, | |
| "grad_norm": 30.662569046020508, | |
| "learning_rate": 6.000000000000001e-07, | |
| "loss": 4.0152, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.16883116883116883, | |
| "grad_norm": 29.455503463745117, | |
| "learning_rate": 6.5e-07, | |
| "loss": 3.9501, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18181818181818182, | |
| "grad_norm": 28.851863861083984, | |
| "learning_rate": 7.000000000000001e-07, | |
| "loss": 3.9066, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.19480519480519481, | |
| "grad_norm": 27.41941261291504, | |
| "learning_rate": 7.5e-07, | |
| "loss": 3.816, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.2077922077922078, | |
| "grad_norm": 25.754905700683594, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 3.6715, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.22077922077922077, | |
| "grad_norm": 24.467802047729492, | |
| "learning_rate": 8.500000000000001e-07, | |
| "loss": 3.6226, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23376623376623376, | |
| "grad_norm": 23.671884536743164, | |
| "learning_rate": 9.000000000000001e-07, | |
| "loss": 3.5756, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.24675324675324675, | |
| "grad_norm": 21.28356170654297, | |
| "learning_rate": 9.500000000000001e-07, | |
| "loss": 3.3822, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.2597402597402597, | |
| "grad_norm": 20.54037857055664, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 3.3799, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 18.18222427368164, | |
| "learning_rate": 1.0500000000000001e-06, | |
| "loss": 3.1964, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 17.782703399658203, | |
| "learning_rate": 1.1e-06, | |
| "loss": 3.1745, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.2987012987012987, | |
| "grad_norm": 15.965900421142578, | |
| "learning_rate": 1.1500000000000002e-06, | |
| "loss": 2.9002, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3116883116883117, | |
| "grad_norm": 16.00725746154785, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "loss": 2.9123, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.3246753246753247, | |
| "grad_norm": 15.301984786987305, | |
| "learning_rate": 1.25e-06, | |
| "loss": 2.6531, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.33766233766233766, | |
| "grad_norm": 16.359207153320312, | |
| "learning_rate": 1.3e-06, | |
| "loss": 2.6089, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.35064935064935066, | |
| "grad_norm": 17.74176025390625, | |
| "learning_rate": 1.3500000000000002e-06, | |
| "loss": 2.4805, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 18.702547073364258, | |
| "learning_rate": 1.4000000000000001e-06, | |
| "loss": 2.3312, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.37662337662337664, | |
| "grad_norm": 18.44041633605957, | |
| "learning_rate": 1.45e-06, | |
| "loss": 2.2273, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.38961038961038963, | |
| "grad_norm": 15.867247581481934, | |
| "learning_rate": 1.5e-06, | |
| "loss": 2.0514, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.4025974025974026, | |
| "grad_norm": 13.97642707824707, | |
| "learning_rate": 1.5500000000000002e-06, | |
| "loss": 1.8689, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.4155844155844156, | |
| "grad_norm": 13.599323272705078, | |
| "learning_rate": 1.6000000000000001e-06, | |
| "loss": 1.7907, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 13.204086303710938, | |
| "learning_rate": 1.6500000000000003e-06, | |
| "loss": 1.6537, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.44155844155844154, | |
| "grad_norm": 13.255966186523438, | |
| "learning_rate": 1.7000000000000002e-06, | |
| "loss": 1.5693, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.45454545454545453, | |
| "grad_norm": 13.00086784362793, | |
| "learning_rate": 1.75e-06, | |
| "loss": 1.3916, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.4675324675324675, | |
| "grad_norm": 12.996549606323242, | |
| "learning_rate": 1.8000000000000001e-06, | |
| "loss": 1.2734, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4805194805194805, | |
| "grad_norm": 12.506110191345215, | |
| "learning_rate": 1.85e-06, | |
| "loss": 1.1444, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.4935064935064935, | |
| "grad_norm": 12.379636764526367, | |
| "learning_rate": 1.9000000000000002e-06, | |
| "loss": 1.033, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5064935064935064, | |
| "grad_norm": 12.38772201538086, | |
| "learning_rate": 1.9500000000000004e-06, | |
| "loss": 0.8932, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5194805194805194, | |
| "grad_norm": 12.370598793029785, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.7914, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5324675324675324, | |
| "grad_norm": 11.510255813598633, | |
| "learning_rate": 2.05e-06, | |
| "loss": 0.6336, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 10.088979721069336, | |
| "learning_rate": 2.1000000000000002e-06, | |
| "loss": 0.5115, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5584415584415584, | |
| "grad_norm": 9.241965293884277, | |
| "learning_rate": 2.15e-06, | |
| "loss": 0.416, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 7.457204341888428, | |
| "learning_rate": 2.2e-06, | |
| "loss": 0.2947, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5844155844155844, | |
| "grad_norm": 6.164954662322998, | |
| "learning_rate": 2.25e-06, | |
| "loss": 0.2202, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5974025974025974, | |
| "grad_norm": 4.587556838989258, | |
| "learning_rate": 2.3000000000000004e-06, | |
| "loss": 0.1721, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6103896103896104, | |
| "grad_norm": 3.2366952896118164, | |
| "learning_rate": 2.35e-06, | |
| "loss": 0.1451, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.6233766233766234, | |
| "grad_norm": 2.397469997406006, | |
| "learning_rate": 2.4000000000000003e-06, | |
| "loss": 0.1116, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6363636363636364, | |
| "grad_norm": 1.872839093208313, | |
| "learning_rate": 2.4500000000000003e-06, | |
| "loss": 0.1, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6493506493506493, | |
| "grad_norm": 1.2464176416397095, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.1024, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6623376623376623, | |
| "grad_norm": 1.0786665678024292, | |
| "learning_rate": 2.55e-06, | |
| "loss": 0.087, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6753246753246753, | |
| "grad_norm": 1.10042142868042, | |
| "learning_rate": 2.6e-06, | |
| "loss": 0.0838, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6883116883116883, | |
| "grad_norm": 0.9031299948692322, | |
| "learning_rate": 2.6500000000000005e-06, | |
| "loss": 0.0804, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.7012987012987013, | |
| "grad_norm": 0.8072167634963989, | |
| "learning_rate": 2.7000000000000004e-06, | |
| "loss": 0.0786, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.6948329210281372, | |
| "learning_rate": 2.7500000000000004e-06, | |
| "loss": 0.0773, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 0.6062957644462585, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 0.0756, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7402597402597403, | |
| "grad_norm": 0.4804394841194153, | |
| "learning_rate": 2.85e-06, | |
| "loss": 0.0697, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7532467532467533, | |
| "grad_norm": 0.5353919863700867, | |
| "learning_rate": 2.9e-06, | |
| "loss": 0.0697, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7662337662337663, | |
| "grad_norm": 0.4344547390937805, | |
| "learning_rate": 2.95e-06, | |
| "loss": 0.0704, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7792207792207793, | |
| "grad_norm": 0.574044942855835, | |
| "learning_rate": 3e-06, | |
| "loss": 0.0778, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7922077922077922, | |
| "grad_norm": 0.5392284393310547, | |
| "learning_rate": 3.05e-06, | |
| "loss": 0.0658, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8051948051948052, | |
| "grad_norm": 0.5098194479942322, | |
| "learning_rate": 3.1000000000000004e-06, | |
| "loss": 0.0661, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8181818181818182, | |
| "grad_norm": 0.3733481168746948, | |
| "learning_rate": 3.1500000000000003e-06, | |
| "loss": 0.0667, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8311688311688312, | |
| "grad_norm": 0.3714415431022644, | |
| "learning_rate": 3.2000000000000003e-06, | |
| "loss": 0.0635, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8441558441558441, | |
| "grad_norm": 0.37473437190055847, | |
| "learning_rate": 3.2500000000000002e-06, | |
| "loss": 0.0661, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 0.35993579030036926, | |
| "learning_rate": 3.3000000000000006e-06, | |
| "loss": 0.0637, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8701298701298701, | |
| "grad_norm": 0.41175705194473267, | |
| "learning_rate": 3.3500000000000005e-06, | |
| "loss": 0.0646, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8831168831168831, | |
| "grad_norm": 0.29564622044563293, | |
| "learning_rate": 3.4000000000000005e-06, | |
| "loss": 0.0627, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.8961038961038961, | |
| "grad_norm": 0.3018590211868286, | |
| "learning_rate": 3.45e-06, | |
| "loss": 0.0613, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "grad_norm": 0.499183714389801, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.0664, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.922077922077922, | |
| "grad_norm": 0.3450847864151001, | |
| "learning_rate": 3.5500000000000003e-06, | |
| "loss": 0.0589, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.935064935064935, | |
| "grad_norm": 0.26693543791770935, | |
| "learning_rate": 3.6000000000000003e-06, | |
| "loss": 0.0633, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.948051948051948, | |
| "grad_norm": 0.2867635190486908, | |
| "learning_rate": 3.65e-06, | |
| "loss": 0.062, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.961038961038961, | |
| "grad_norm": 0.3328873813152313, | |
| "learning_rate": 3.7e-06, | |
| "loss": 0.0587, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.974025974025974, | |
| "grad_norm": 0.33611243963241577, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "loss": 0.058, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.987012987012987, | |
| "grad_norm": 0.2983446419239044, | |
| "learning_rate": 3.8000000000000005e-06, | |
| "loss": 0.0632, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.3246895372867584, | |
| "learning_rate": 3.85e-06, | |
| "loss": 0.0583, | |
| "step": 77 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 462, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 77, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.721176540965765e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |