| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 12.695652173913043, | |
| "eval_steps": 500, | |
| "global_step": 70, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.17391304347826086, | |
| "grad_norm": 10.28883171081543, | |
| "learning_rate": 2e-05, | |
| "loss": 2.6361, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.34782608695652173, | |
| "grad_norm": 7.904043197631836, | |
| "learning_rate": 4e-05, | |
| "loss": 2.7168, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.5217391304347826, | |
| "grad_norm": 7.175183296203613, | |
| "learning_rate": 6e-05, | |
| "loss": 2.5857, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.6956521739130435, | |
| "grad_norm": 10.86050033569336, | |
| "learning_rate": 8e-05, | |
| "loss": 2.6506, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 8.235480308532715, | |
| "learning_rate": 0.0001, | |
| "loss": 2.4973, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 1.0869565217391304, | |
| "grad_norm": 8.710506439208984, | |
| "learning_rate": 0.00012, | |
| "loss": 3.7313, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 1.2608695652173914, | |
| "grad_norm": 4.005091667175293, | |
| "learning_rate": 0.00014, | |
| "loss": 1.9344, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 1.434782608695652, | |
| "grad_norm": 1.7361865043640137, | |
| "learning_rate": 0.00016, | |
| "loss": 1.6744, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 1.608695652173913, | |
| "grad_norm": 1.129019021987915, | |
| "learning_rate": 0.00018, | |
| "loss": 1.2627, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 1.7826086956521738, | |
| "grad_norm": 2.075225830078125, | |
| "learning_rate": 0.0002, | |
| "loss": 1.1692, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.9565217391304348, | |
| "grad_norm": 3.220959424972534, | |
| "learning_rate": 0.0001998629534754574, | |
| "loss": 1.3969, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 2.1739130434782608, | |
| "grad_norm": 2.02755069732666, | |
| "learning_rate": 0.00019945218953682734, | |
| "loss": 0.6773, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 2.3478260869565215, | |
| "grad_norm": 2.5127182006835938, | |
| "learning_rate": 0.00019876883405951377, | |
| "loss": 0.5633, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 2.5217391304347827, | |
| "grad_norm": 1.7217646837234497, | |
| "learning_rate": 0.00019781476007338058, | |
| "loss": 0.451, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 2.6956521739130435, | |
| "grad_norm": 0.879663348197937, | |
| "learning_rate": 0.00019659258262890683, | |
| "loss": 0.3685, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 2.869565217391304, | |
| "grad_norm": 0.5109443664550781, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 0.3015, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 3.0869565217391304, | |
| "grad_norm": 0.9060106873512268, | |
| "learning_rate": 0.00019335804264972018, | |
| "loss": 0.4393, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 3.260869565217391, | |
| "grad_norm": 0.3581462800502777, | |
| "learning_rate": 0.0001913545457642601, | |
| "loss": 0.1748, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 3.4347826086956523, | |
| "grad_norm": 0.3196995258331299, | |
| "learning_rate": 0.0001891006524188368, | |
| "loss": 0.1477, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 3.608695652173913, | |
| "grad_norm": 0.5006406307220459, | |
| "learning_rate": 0.00018660254037844388, | |
| "loss": 0.1849, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 3.782608695652174, | |
| "grad_norm": 0.6925095319747925, | |
| "learning_rate": 0.00018386705679454242, | |
| "loss": 0.1369, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 3.9565217391304346, | |
| "grad_norm": 0.9922828674316406, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 0.1964, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 4.173913043478261, | |
| "grad_norm": 0.19768103957176208, | |
| "learning_rate": 0.0001777145961456971, | |
| "loss": 0.0935, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 4.3478260869565215, | |
| "grad_norm": 0.357398122549057, | |
| "learning_rate": 0.00017431448254773944, | |
| "loss": 0.0992, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 4.521739130434782, | |
| "grad_norm": 0.223428413271904, | |
| "learning_rate": 0.00017071067811865476, | |
| "loss": 0.086, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 4.695652173913043, | |
| "grad_norm": 0.169768288731575, | |
| "learning_rate": 0.00016691306063588583, | |
| "loss": 0.0732, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 4.869565217391305, | |
| "grad_norm": 0.4857354164123535, | |
| "learning_rate": 0.00016293203910498376, | |
| "loss": 0.0819, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 5.086956521739131, | |
| "grad_norm": 0.7584252953529358, | |
| "learning_rate": 0.00015877852522924732, | |
| "loss": 0.1377, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 5.260869565217392, | |
| "grad_norm": 0.301729291677475, | |
| "learning_rate": 0.00015446390350150273, | |
| "loss": 0.0559, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 5.434782608695652, | |
| "grad_norm": 0.25065937638282776, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 0.0744, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 5.608695652173913, | |
| "grad_norm": 0.24226656556129456, | |
| "learning_rate": 0.00014539904997395468, | |
| "loss": 0.0687, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 5.782608695652174, | |
| "grad_norm": 0.2135501354932785, | |
| "learning_rate": 0.00014067366430758004, | |
| "loss": 0.0574, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 5.956521739130435, | |
| "grad_norm": 0.2674519121646881, | |
| "learning_rate": 0.00013583679495453, | |
| "loss": 0.0868, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 6.173913043478261, | |
| "grad_norm": 0.22415511310100555, | |
| "learning_rate": 0.00013090169943749476, | |
| "loss": 0.0557, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 6.3478260869565215, | |
| "grad_norm": 0.16434131562709808, | |
| "learning_rate": 0.00012588190451025207, | |
| "loss": 0.0475, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 6.521739130434782, | |
| "grad_norm": 0.15994659066200256, | |
| "learning_rate": 0.00012079116908177593, | |
| "loss": 0.0466, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 6.695652173913043, | |
| "grad_norm": 0.1557261049747467, | |
| "learning_rate": 0.0001156434465040231, | |
| "loss": 0.0571, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 6.869565217391305, | |
| "grad_norm": 0.14265908300876617, | |
| "learning_rate": 0.00011045284632676536, | |
| "loss": 0.0495, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 7.086956521739131, | |
| "grad_norm": 0.15171098709106445, | |
| "learning_rate": 0.0001052335956242944, | |
| "loss": 0.0531, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 7.260869565217392, | |
| "grad_norm": 0.11054924130439758, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0422, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 7.434782608695652, | |
| "grad_norm": 0.13466708362102509, | |
| "learning_rate": 9.476640437570562e-05, | |
| "loss": 0.0428, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 7.608695652173913, | |
| "grad_norm": 0.6286543607711792, | |
| "learning_rate": 8.954715367323468e-05, | |
| "loss": 0.0521, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 7.782608695652174, | |
| "grad_norm": 0.08527137339115143, | |
| "learning_rate": 8.435655349597689e-05, | |
| "loss": 0.0284, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 7.956521739130435, | |
| "grad_norm": 0.1831391453742981, | |
| "learning_rate": 7.920883091822408e-05, | |
| "loss": 0.0535, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 8.173913043478262, | |
| "grad_norm": 0.25190696120262146, | |
| "learning_rate": 7.411809548974792e-05, | |
| "loss": 0.0423, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 8.347826086956522, | |
| "grad_norm": 0.12309681624174118, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.042, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 8.521739130434783, | |
| "grad_norm": 0.22868376970291138, | |
| "learning_rate": 6.416320504546997e-05, | |
| "loss": 0.045, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 8.695652173913043, | |
| "grad_norm": 0.07320209592580795, | |
| "learning_rate": 5.9326335692419995e-05, | |
| "loss": 0.0326, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 8.869565217391305, | |
| "grad_norm": 0.35984838008880615, | |
| "learning_rate": 5.4600950026045326e-05, | |
| "loss": 0.0363, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 9.08695652173913, | |
| "grad_norm": 0.1674010455608368, | |
| "learning_rate": 5.000000000000002e-05, | |
| "loss": 0.0511, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 9.26086956521739, | |
| "grad_norm": 0.08465681225061417, | |
| "learning_rate": 4.5536096498497295e-05, | |
| "loss": 0.0351, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 9.434782608695652, | |
| "grad_norm": 0.10184628516435623, | |
| "learning_rate": 4.12214747707527e-05, | |
| "loss": 0.0262, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 9.608695652173914, | |
| "grad_norm": 0.062240391969680786, | |
| "learning_rate": 3.7067960895016275e-05, | |
| "loss": 0.0304, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 9.782608695652174, | |
| "grad_norm": 0.09094466269016266, | |
| "learning_rate": 3.308693936411421e-05, | |
| "loss": 0.0385, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 9.956521739130435, | |
| "grad_norm": 0.27231261134147644, | |
| "learning_rate": 2.9289321881345254e-05, | |
| "loss": 0.0544, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 10.173913043478262, | |
| "grad_norm": 0.07962878793478012, | |
| "learning_rate": 2.5685517452260567e-05, | |
| "loss": 0.0401, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 10.347826086956522, | |
| "grad_norm": 0.12017780542373657, | |
| "learning_rate": 2.2285403854302912e-05, | |
| "loss": 0.0316, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 10.521739130434783, | |
| "grad_norm": 0.09102747589349747, | |
| "learning_rate": 1.9098300562505266e-05, | |
| "loss": 0.0418, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 10.695652173913043, | |
| "grad_norm": 0.0645853728055954, | |
| "learning_rate": 1.6132943205457606e-05, | |
| "loss": 0.0309, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 10.869565217391305, | |
| "grad_norm": 0.08740692585706711, | |
| "learning_rate": 1.339745962155613e-05, | |
| "loss": 0.0333, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 11.08695652173913, | |
| "grad_norm": 0.11425518244504929, | |
| "learning_rate": 1.0899347581163221e-05, | |
| "loss": 0.0392, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 11.26086956521739, | |
| "grad_norm": 0.09765691310167313, | |
| "learning_rate": 8.645454235739903e-06, | |
| "loss": 0.0344, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 11.434782608695652, | |
| "grad_norm": 0.06935255229473114, | |
| "learning_rate": 6.6419573502798374e-06, | |
| "loss": 0.0327, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 11.608695652173914, | |
| "grad_norm": 0.06399647146463394, | |
| "learning_rate": 4.8943483704846475e-06, | |
| "loss": 0.0281, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 11.782608695652174, | |
| "grad_norm": 0.18286477029323578, | |
| "learning_rate": 3.40741737109318e-06, | |
| "loss": 0.0373, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 11.956521739130435, | |
| "grad_norm": 0.154697448015213, | |
| "learning_rate": 2.1852399266194314e-06, | |
| "loss": 0.0547, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 12.173913043478262, | |
| "grad_norm": 0.22570864856243134, | |
| "learning_rate": 1.231165940486234e-06, | |
| "loss": 0.0328, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 12.347826086956522, | |
| "grad_norm": 0.08155275881290436, | |
| "learning_rate": 5.478104631726711e-07, | |
| "loss": 0.0388, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 12.521739130434783, | |
| "grad_norm": 0.07228947430849075, | |
| "learning_rate": 1.3704652454261668e-07, | |
| "loss": 0.0272, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 12.695652173913043, | |
| "grad_norm": 0.0742647722363472, | |
| "learning_rate": 0.0, | |
| "loss": 0.0308, | |
| "step": 70 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 70, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 14, | |
| "save_steps": 5, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 144357206261760.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |