model_4061661e / checkpoint-232 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
229ea0a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 232,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008620689655172414,
"grad_norm": 30.156124114990234,
"learning_rate": 5.0000000000000004e-08,
"loss": 3.946,
"step": 1
},
{
"epoch": 0.017241379310344827,
"grad_norm": 29.95058250427246,
"learning_rate": 1.0000000000000001e-07,
"loss": 4.0398,
"step": 2
},
{
"epoch": 0.02586206896551724,
"grad_norm": 29.866586685180664,
"learning_rate": 1.5000000000000002e-07,
"loss": 3.9303,
"step": 3
},
{
"epoch": 0.034482758620689655,
"grad_norm": 30.012300491333008,
"learning_rate": 2.0000000000000002e-07,
"loss": 3.9637,
"step": 4
},
{
"epoch": 0.04310344827586207,
"grad_norm": 30.934751510620117,
"learning_rate": 2.5000000000000004e-07,
"loss": 4.0617,
"step": 5
},
{
"epoch": 0.05172413793103448,
"grad_norm": 30.031415939331055,
"learning_rate": 3.0000000000000004e-07,
"loss": 3.9659,
"step": 6
},
{
"epoch": 0.0603448275862069,
"grad_norm": 29.62813949584961,
"learning_rate": 3.5000000000000004e-07,
"loss": 3.9978,
"step": 7
},
{
"epoch": 0.06896551724137931,
"grad_norm": 31.0523681640625,
"learning_rate": 4.0000000000000003e-07,
"loss": 4.0237,
"step": 8
},
{
"epoch": 0.07758620689655173,
"grad_norm": 28.83756446838379,
"learning_rate": 4.5000000000000003e-07,
"loss": 3.8214,
"step": 9
},
{
"epoch": 0.08620689655172414,
"grad_norm": 28.1810359954834,
"learning_rate": 5.000000000000001e-07,
"loss": 3.848,
"step": 10
},
{
"epoch": 0.09482758620689655,
"grad_norm": 28.367570877075195,
"learning_rate": 5.5e-07,
"loss": 3.7026,
"step": 11
},
{
"epoch": 0.10344827586206896,
"grad_norm": 27.66380500793457,
"learning_rate": 6.000000000000001e-07,
"loss": 3.6833,
"step": 12
},
{
"epoch": 0.11206896551724138,
"grad_norm": 27.496694564819336,
"learning_rate": 6.5e-07,
"loss": 3.762,
"step": 13
},
{
"epoch": 0.1206896551724138,
"grad_norm": 28.319055557250977,
"learning_rate": 7.000000000000001e-07,
"loss": 3.8867,
"step": 14
},
{
"epoch": 0.12931034482758622,
"grad_norm": 26.112581253051758,
"learning_rate": 7.5e-07,
"loss": 3.6278,
"step": 15
},
{
"epoch": 0.13793103448275862,
"grad_norm": 25.8245792388916,
"learning_rate": 8.000000000000001e-07,
"loss": 3.7957,
"step": 16
},
{
"epoch": 0.14655172413793102,
"grad_norm": 23.66245460510254,
"learning_rate": 8.500000000000001e-07,
"loss": 3.4384,
"step": 17
},
{
"epoch": 0.15517241379310345,
"grad_norm": 21.69405746459961,
"learning_rate": 9.000000000000001e-07,
"loss": 3.2602,
"step": 18
},
{
"epoch": 0.16379310344827586,
"grad_norm": 20.691402435302734,
"learning_rate": 9.500000000000001e-07,
"loss": 3.2486,
"step": 19
},
{
"epoch": 0.1724137931034483,
"grad_norm": 20.67167854309082,
"learning_rate": 1.0000000000000002e-06,
"loss": 3.2401,
"step": 20
},
{
"epoch": 0.1810344827586207,
"grad_norm": 19.054428100585938,
"learning_rate": 1.0500000000000001e-06,
"loss": 3.0375,
"step": 21
},
{
"epoch": 0.1896551724137931,
"grad_norm": 18.890884399414062,
"learning_rate": 1.1e-06,
"loss": 2.9677,
"step": 22
},
{
"epoch": 0.19827586206896552,
"grad_norm": 18.61600112915039,
"learning_rate": 1.1500000000000002e-06,
"loss": 2.8387,
"step": 23
},
{
"epoch": 0.20689655172413793,
"grad_norm": 16.910585403442383,
"learning_rate": 1.2000000000000002e-06,
"loss": 2.4914,
"step": 24
},
{
"epoch": 0.21551724137931033,
"grad_norm": 17.708385467529297,
"learning_rate": 1.25e-06,
"loss": 2.5361,
"step": 25
},
{
"epoch": 0.22413793103448276,
"grad_norm": 17.07745933532715,
"learning_rate": 1.3e-06,
"loss": 2.4098,
"step": 26
},
{
"epoch": 0.23275862068965517,
"grad_norm": 16.244144439697266,
"learning_rate": 1.3500000000000002e-06,
"loss": 2.2155,
"step": 27
},
{
"epoch": 0.2413793103448276,
"grad_norm": 16.11887550354004,
"learning_rate": 1.4000000000000001e-06,
"loss": 2.0036,
"step": 28
},
{
"epoch": 0.25,
"grad_norm": 16.034631729125977,
"learning_rate": 1.45e-06,
"loss": 1.8617,
"step": 29
},
{
"epoch": 0.25862068965517243,
"grad_norm": 15.437153816223145,
"learning_rate": 1.5e-06,
"loss": 1.7413,
"step": 30
},
{
"epoch": 0.2672413793103448,
"grad_norm": 14.188116073608398,
"learning_rate": 1.5500000000000002e-06,
"loss": 1.5002,
"step": 31
},
{
"epoch": 0.27586206896551724,
"grad_norm": 14.200998306274414,
"learning_rate": 1.6000000000000001e-06,
"loss": 1.4469,
"step": 32
},
{
"epoch": 0.28448275862068967,
"grad_norm": 13.209551811218262,
"learning_rate": 1.6500000000000003e-06,
"loss": 1.3016,
"step": 33
},
{
"epoch": 0.29310344827586204,
"grad_norm": 12.631085395812988,
"learning_rate": 1.7000000000000002e-06,
"loss": 1.1522,
"step": 34
},
{
"epoch": 0.3017241379310345,
"grad_norm": 12.504134178161621,
"learning_rate": 1.75e-06,
"loss": 1.0058,
"step": 35
},
{
"epoch": 0.3103448275862069,
"grad_norm": 12.599784851074219,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.921,
"step": 36
},
{
"epoch": 0.31896551724137934,
"grad_norm": 11.577665328979492,
"learning_rate": 1.85e-06,
"loss": 0.8099,
"step": 37
},
{
"epoch": 0.3275862068965517,
"grad_norm": 10.465872764587402,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.6565,
"step": 38
},
{
"epoch": 0.33620689655172414,
"grad_norm": 10.161813735961914,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.5712,
"step": 39
},
{
"epoch": 0.3448275862068966,
"grad_norm": 8.384145736694336,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.4265,
"step": 40
},
{
"epoch": 0.35344827586206895,
"grad_norm": 7.072062015533447,
"learning_rate": 2.05e-06,
"loss": 0.409,
"step": 41
},
{
"epoch": 0.3620689655172414,
"grad_norm": 6.072140693664551,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.3263,
"step": 42
},
{
"epoch": 0.3706896551724138,
"grad_norm": 6.31119441986084,
"learning_rate": 2.15e-06,
"loss": 0.258,
"step": 43
},
{
"epoch": 0.3793103448275862,
"grad_norm": 6.619389057159424,
"learning_rate": 2.2e-06,
"loss": 0.2454,
"step": 44
},
{
"epoch": 0.3879310344827586,
"grad_norm": 6.546375751495361,
"learning_rate": 2.25e-06,
"loss": 0.2314,
"step": 45
},
{
"epoch": 0.39655172413793105,
"grad_norm": 5.219631671905518,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.2006,
"step": 46
},
{
"epoch": 0.4051724137931034,
"grad_norm": 3.1164474487304688,
"learning_rate": 2.35e-06,
"loss": 0.1677,
"step": 47
},
{
"epoch": 0.41379310344827586,
"grad_norm": 1.872147798538208,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.1339,
"step": 48
},
{
"epoch": 0.4224137931034483,
"grad_norm": 1.4775545597076416,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.1218,
"step": 49
},
{
"epoch": 0.43103448275862066,
"grad_norm": 1.2931559085845947,
"learning_rate": 2.5e-06,
"loss": 0.1151,
"step": 50
},
{
"epoch": 0.4396551724137931,
"grad_norm": 1.3222297430038452,
"learning_rate": 2.55e-06,
"loss": 0.1164,
"step": 51
},
{
"epoch": 0.4482758620689655,
"grad_norm": 1.393062710762024,
"learning_rate": 2.6e-06,
"loss": 0.1053,
"step": 52
},
{
"epoch": 0.45689655172413796,
"grad_norm": 1.0838805437088013,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.105,
"step": 53
},
{
"epoch": 0.46551724137931033,
"grad_norm": 0.954925537109375,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0922,
"step": 54
},
{
"epoch": 0.47413793103448276,
"grad_norm": 0.7521713972091675,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0887,
"step": 55
},
{
"epoch": 0.4827586206896552,
"grad_norm": 0.7261010408401489,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0997,
"step": 56
},
{
"epoch": 0.49137931034482757,
"grad_norm": 0.6530802845954895,
"learning_rate": 2.85e-06,
"loss": 0.0882,
"step": 57
},
{
"epoch": 0.5,
"grad_norm": 0.6899245381355286,
"learning_rate": 2.9e-06,
"loss": 0.0844,
"step": 58
},
{
"epoch": 0.5086206896551724,
"grad_norm": 0.6771528124809265,
"learning_rate": 2.95e-06,
"loss": 0.0884,
"step": 59
},
{
"epoch": 0.5172413793103449,
"grad_norm": 0.6307985782623291,
"learning_rate": 3e-06,
"loss": 0.0912,
"step": 60
},
{
"epoch": 0.5258620689655172,
"grad_norm": 0.5869951844215393,
"learning_rate": 3.05e-06,
"loss": 0.0875,
"step": 61
},
{
"epoch": 0.5344827586206896,
"grad_norm": 0.6039404273033142,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.0768,
"step": 62
},
{
"epoch": 0.5431034482758621,
"grad_norm": 0.49209344387054443,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0815,
"step": 63
},
{
"epoch": 0.5517241379310345,
"grad_norm": 0.7383344769477844,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0874,
"step": 64
},
{
"epoch": 0.5603448275862069,
"grad_norm": 0.5552617311477661,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0762,
"step": 65
},
{
"epoch": 0.5689655172413793,
"grad_norm": 0.4992441236972809,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.0793,
"step": 66
},
{
"epoch": 0.5775862068965517,
"grad_norm": 0.4979636073112488,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0792,
"step": 67
},
{
"epoch": 0.5862068965517241,
"grad_norm": 0.4675934612751007,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0766,
"step": 68
},
{
"epoch": 0.5948275862068966,
"grad_norm": 0.49495571851730347,
"learning_rate": 3.45e-06,
"loss": 0.0824,
"step": 69
},
{
"epoch": 0.603448275862069,
"grad_norm": 0.6044315695762634,
"learning_rate": 3.5e-06,
"loss": 0.0826,
"step": 70
},
{
"epoch": 0.6120689655172413,
"grad_norm": 0.4898519217967987,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0755,
"step": 71
},
{
"epoch": 0.6206896551724138,
"grad_norm": 0.4218939244747162,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0762,
"step": 72
},
{
"epoch": 0.6293103448275862,
"grad_norm": 0.5785802006721497,
"learning_rate": 3.65e-06,
"loss": 0.0837,
"step": 73
},
{
"epoch": 0.6379310344827587,
"grad_norm": 0.5505399703979492,
"learning_rate": 3.7e-06,
"loss": 0.0814,
"step": 74
},
{
"epoch": 0.646551724137931,
"grad_norm": 0.6062561869621277,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0725,
"step": 75
},
{
"epoch": 0.6551724137931034,
"grad_norm": 0.707350492477417,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.0913,
"step": 76
},
{
"epoch": 0.6637931034482759,
"grad_norm": 0.5091889500617981,
"learning_rate": 3.85e-06,
"loss": 0.0697,
"step": 77
},
{
"epoch": 0.6724137931034483,
"grad_norm": 0.4801473319530487,
"learning_rate": 3.900000000000001e-06,
"loss": 0.073,
"step": 78
},
{
"epoch": 0.6810344827586207,
"grad_norm": 0.462162047624588,
"learning_rate": 3.95e-06,
"loss": 0.0805,
"step": 79
},
{
"epoch": 0.6896551724137931,
"grad_norm": 0.463969349861145,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0738,
"step": 80
},
{
"epoch": 0.6982758620689655,
"grad_norm": 0.3980114161968231,
"learning_rate": 4.05e-06,
"loss": 0.0742,
"step": 81
},
{
"epoch": 0.7068965517241379,
"grad_norm": 0.3627180755138397,
"learning_rate": 4.1e-06,
"loss": 0.0683,
"step": 82
},
{
"epoch": 0.7155172413793104,
"grad_norm": 0.39726322889328003,
"learning_rate": 4.15e-06,
"loss": 0.0717,
"step": 83
},
{
"epoch": 0.7241379310344828,
"grad_norm": 0.48898085951805115,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.0676,
"step": 84
},
{
"epoch": 0.7327586206896551,
"grad_norm": 0.6143100261688232,
"learning_rate": 4.25e-06,
"loss": 0.0708,
"step": 85
},
{
"epoch": 0.7413793103448276,
"grad_norm": 0.5028995275497437,
"learning_rate": 4.3e-06,
"loss": 0.0713,
"step": 86
},
{
"epoch": 0.75,
"grad_norm": 0.5576066970825195,
"learning_rate": 4.350000000000001e-06,
"loss": 0.078,
"step": 87
},
{
"epoch": 0.7586206896551724,
"grad_norm": 0.37101301550865173,
"learning_rate": 4.4e-06,
"loss": 0.071,
"step": 88
},
{
"epoch": 0.7672413793103449,
"grad_norm": 0.442694753408432,
"learning_rate": 4.450000000000001e-06,
"loss": 0.0791,
"step": 89
},
{
"epoch": 0.7758620689655172,
"grad_norm": 0.48991039395332336,
"learning_rate": 4.5e-06,
"loss": 0.0681,
"step": 90
},
{
"epoch": 0.7844827586206896,
"grad_norm": 0.46367791295051575,
"learning_rate": 4.5500000000000005e-06,
"loss": 0.0701,
"step": 91
},
{
"epoch": 0.7931034482758621,
"grad_norm": 0.3454825282096863,
"learning_rate": 4.600000000000001e-06,
"loss": 0.0774,
"step": 92
},
{
"epoch": 0.8017241379310345,
"grad_norm": 0.35102447867393494,
"learning_rate": 4.65e-06,
"loss": 0.0659,
"step": 93
},
{
"epoch": 0.8103448275862069,
"grad_norm": 0.3791246712207794,
"learning_rate": 4.7e-06,
"loss": 0.0727,
"step": 94
},
{
"epoch": 0.8189655172413793,
"grad_norm": 0.3911365270614624,
"learning_rate": 4.75e-06,
"loss": 0.0641,
"step": 95
},
{
"epoch": 0.8275862068965517,
"grad_norm": 0.35395047068595886,
"learning_rate": 4.800000000000001e-06,
"loss": 0.0735,
"step": 96
},
{
"epoch": 0.8362068965517241,
"grad_norm": 0.3499661087989807,
"learning_rate": 4.85e-06,
"loss": 0.0657,
"step": 97
},
{
"epoch": 0.8448275862068966,
"grad_norm": 0.34877678751945496,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.0701,
"step": 98
},
{
"epoch": 0.853448275862069,
"grad_norm": 0.36436647176742554,
"learning_rate": 4.95e-06,
"loss": 0.0758,
"step": 99
},
{
"epoch": 0.8620689655172413,
"grad_norm": 0.4726298749446869,
"learning_rate": 5e-06,
"loss": 0.0731,
"step": 100
},
{
"epoch": 0.8706896551724138,
"grad_norm": 0.31892526149749756,
"learning_rate": 4.999965269084342e-06,
"loss": 0.0635,
"step": 101
},
{
"epoch": 0.8793103448275862,
"grad_norm": 0.3707881569862366,
"learning_rate": 4.999861077302358e-06,
"loss": 0.0689,
"step": 102
},
{
"epoch": 0.8879310344827587,
"grad_norm": 0.3244622051715851,
"learning_rate": 4.999687427548989e-06,
"loss": 0.0645,
"step": 103
},
{
"epoch": 0.896551724137931,
"grad_norm": 0.386982262134552,
"learning_rate": 4.999444324649045e-06,
"loss": 0.0708,
"step": 104
},
{
"epoch": 0.9051724137931034,
"grad_norm": 0.3552643656730652,
"learning_rate": 4.999131775357078e-06,
"loss": 0.0766,
"step": 105
},
{
"epoch": 0.9137931034482759,
"grad_norm": 0.3503671884536743,
"learning_rate": 4.998749788357184e-06,
"loss": 0.0733,
"step": 106
},
{
"epoch": 0.9224137931034483,
"grad_norm": 0.4907379746437073,
"learning_rate": 4.998298374262771e-06,
"loss": 0.0721,
"step": 107
},
{
"epoch": 0.9310344827586207,
"grad_norm": 0.305571585893631,
"learning_rate": 4.997777545616258e-06,
"loss": 0.0603,
"step": 108
},
{
"epoch": 0.9396551724137931,
"grad_norm": 0.3188783824443817,
"learning_rate": 4.99718731688873e-06,
"loss": 0.0574,
"step": 109
},
{
"epoch": 0.9482758620689655,
"grad_norm": 0.3277616500854492,
"learning_rate": 4.996527704479535e-06,
"loss": 0.0671,
"step": 110
},
{
"epoch": 0.9568965517241379,
"grad_norm": 0.38640740513801575,
"learning_rate": 4.995798726715826e-06,
"loss": 0.0625,
"step": 111
},
{
"epoch": 0.9655172413793104,
"grad_norm": 0.5463483929634094,
"learning_rate": 4.995000403852057e-06,
"loss": 0.0718,
"step": 112
},
{
"epoch": 0.9741379310344828,
"grad_norm": 0.3974014222621918,
"learning_rate": 4.994132758069413e-06,
"loss": 0.071,
"step": 113
},
{
"epoch": 0.9827586206896551,
"grad_norm": 0.3642785847187042,
"learning_rate": 4.993195813475202e-06,
"loss": 0.0658,
"step": 114
},
{
"epoch": 0.9913793103448276,
"grad_norm": 0.3118143677711487,
"learning_rate": 4.992189596102179e-06,
"loss": 0.0633,
"step": 115
},
{
"epoch": 1.0,
"grad_norm": 0.41573596000671387,
"learning_rate": 4.991114133907822e-06,
"loss": 0.0662,
"step": 116
},
{
"epoch": 1.0086206896551724,
"grad_norm": 0.29761484265327454,
"learning_rate": 4.989969456773562e-06,
"loss": 0.0649,
"step": 117
},
{
"epoch": 1.0172413793103448,
"grad_norm": 0.399275004863739,
"learning_rate": 4.988755596503948e-06,
"loss": 0.0521,
"step": 118
},
{
"epoch": 1.0258620689655173,
"grad_norm": 0.37820911407470703,
"learning_rate": 4.987472586825762e-06,
"loss": 0.0596,
"step": 119
},
{
"epoch": 1.0344827586206897,
"grad_norm": 0.4371725916862488,
"learning_rate": 4.986120463387084e-06,
"loss": 0.07,
"step": 120
},
{
"epoch": 1.043103448275862,
"grad_norm": 0.3165535032749176,
"learning_rate": 4.984699263756303e-06,
"loss": 0.0613,
"step": 121
},
{
"epoch": 1.0517241379310345,
"grad_norm": 0.40526920557022095,
"learning_rate": 4.983209027421072e-06,
"loss": 0.0611,
"step": 122
},
{
"epoch": 1.0603448275862069,
"grad_norm": 0.39523962140083313,
"learning_rate": 4.9816497957872055e-06,
"loss": 0.0634,
"step": 123
},
{
"epoch": 1.0689655172413792,
"grad_norm": 0.33234691619873047,
"learning_rate": 4.9800216121775404e-06,
"loss": 0.0602,
"step": 124
},
{
"epoch": 1.0775862068965518,
"grad_norm": 0.3455008268356323,
"learning_rate": 4.978324521830721e-06,
"loss": 0.0624,
"step": 125
},
{
"epoch": 1.0862068965517242,
"grad_norm": 0.3968574106693268,
"learning_rate": 4.97655857189995e-06,
"loss": 0.0525,
"step": 126
},
{
"epoch": 1.0948275862068966,
"grad_norm": 0.47838935256004333,
"learning_rate": 4.974723811451673e-06,
"loss": 0.0617,
"step": 127
},
{
"epoch": 1.103448275862069,
"grad_norm": 0.43055081367492676,
"learning_rate": 4.972820291464219e-06,
"loss": 0.0484,
"step": 128
},
{
"epoch": 1.1120689655172413,
"grad_norm": 0.4012243449687958,
"learning_rate": 4.97084806482638e-06,
"loss": 0.0562,
"step": 129
},
{
"epoch": 1.1206896551724137,
"grad_norm": 0.586875319480896,
"learning_rate": 4.968807186335948e-06,
"loss": 0.0633,
"step": 130
},
{
"epoch": 1.1293103448275863,
"grad_norm": 0.48629340529441833,
"learning_rate": 4.966697712698185e-06,
"loss": 0.0615,
"step": 131
},
{
"epoch": 1.1379310344827587,
"grad_norm": 0.41810542345046997,
"learning_rate": 4.964519702524251e-06,
"loss": 0.0564,
"step": 132
},
{
"epoch": 1.146551724137931,
"grad_norm": 0.4095732271671295,
"learning_rate": 4.962273216329577e-06,
"loss": 0.0634,
"step": 133
},
{
"epoch": 1.1551724137931034,
"grad_norm": 0.3784744441509247,
"learning_rate": 4.959958316532181e-06,
"loss": 0.0521,
"step": 134
},
{
"epoch": 1.1637931034482758,
"grad_norm": 0.4087596833705902,
"learning_rate": 4.957575067450935e-06,
"loss": 0.0569,
"step": 135
},
{
"epoch": 1.1724137931034484,
"grad_norm": 0.4369741380214691,
"learning_rate": 4.955123535303775e-06,
"loss": 0.057,
"step": 136
},
{
"epoch": 1.1810344827586208,
"grad_norm": 0.3851775527000427,
"learning_rate": 4.95260378820587e-06,
"loss": 0.0521,
"step": 137
},
{
"epoch": 1.1896551724137931,
"grad_norm": 0.3391599655151367,
"learning_rate": 4.950015896167716e-06,
"loss": 0.0525,
"step": 138
},
{
"epoch": 1.1982758620689655,
"grad_norm": 0.4176456332206726,
"learning_rate": 4.947359931093202e-06,
"loss": 0.0548,
"step": 139
},
{
"epoch": 1.206896551724138,
"grad_norm": 0.45864036679267883,
"learning_rate": 4.944635966777607e-06,
"loss": 0.0578,
"step": 140
},
{
"epoch": 1.2155172413793103,
"grad_norm": 0.4484093487262726,
"learning_rate": 4.941844078905551e-06,
"loss": 0.0528,
"step": 141
},
{
"epoch": 1.2241379310344827,
"grad_norm": 0.42641302943229675,
"learning_rate": 4.938984345048892e-06,
"loss": 0.0519,
"step": 142
},
{
"epoch": 1.2327586206896552,
"grad_norm": 0.5111292004585266,
"learning_rate": 4.936056844664571e-06,
"loss": 0.0639,
"step": 143
},
{
"epoch": 1.2413793103448276,
"grad_norm": 0.3739636540412903,
"learning_rate": 4.933061659092401e-06,
"loss": 0.055,
"step": 144
},
{
"epoch": 1.25,
"grad_norm": 0.3896700143814087,
"learning_rate": 4.929998871552814e-06,
"loss": 0.0548,
"step": 145
},
{
"epoch": 1.2586206896551724,
"grad_norm": 0.4158782660961151,
"learning_rate": 4.926868567144543e-06,
"loss": 0.0572,
"step": 146
},
{
"epoch": 1.2672413793103448,
"grad_norm": 0.4275132715702057,
"learning_rate": 4.923670832842256e-06,
"loss": 0.0497,
"step": 147
},
{
"epoch": 1.2758620689655173,
"grad_norm": 0.3890449106693268,
"learning_rate": 4.920405757494147e-06,
"loss": 0.0584,
"step": 148
},
{
"epoch": 1.2844827586206897,
"grad_norm": 0.4569760262966156,
"learning_rate": 4.917073431819462e-06,
"loss": 0.0618,
"step": 149
},
{
"epoch": 1.293103448275862,
"grad_norm": 0.4232596457004547,
"learning_rate": 4.913673948405977e-06,
"loss": 0.0516,
"step": 150
},
{
"epoch": 1.3017241379310345,
"grad_norm": 0.4578849673271179,
"learning_rate": 4.910207401707431e-06,
"loss": 0.0538,
"step": 151
},
{
"epoch": 1.3103448275862069,
"grad_norm": 0.39952200651168823,
"learning_rate": 4.906673888040895e-06,
"loss": 0.0471,
"step": 152
},
{
"epoch": 1.3189655172413794,
"grad_norm": 0.38846731185913086,
"learning_rate": 4.903073505584102e-06,
"loss": 0.0437,
"step": 153
},
{
"epoch": 1.3275862068965516,
"grad_norm": 0.4084296226501465,
"learning_rate": 4.899406354372716e-06,
"loss": 0.0532,
"step": 154
},
{
"epoch": 1.3362068965517242,
"grad_norm": 0.4114966094493866,
"learning_rate": 4.895672536297551e-06,
"loss": 0.0443,
"step": 155
},
{
"epoch": 1.3448275862068966,
"grad_norm": 0.6465100049972534,
"learning_rate": 4.891872155101746e-06,
"loss": 0.0605,
"step": 156
},
{
"epoch": 1.353448275862069,
"grad_norm": 0.43234798312187195,
"learning_rate": 4.888005316377873e-06,
"loss": 0.0529,
"step": 157
},
{
"epoch": 1.3620689655172413,
"grad_norm": 0.408527135848999,
"learning_rate": 4.884072127565015e-06,
"loss": 0.0485,
"step": 158
},
{
"epoch": 1.3706896551724137,
"grad_norm": 0.45152318477630615,
"learning_rate": 4.880072697945768e-06,
"loss": 0.0466,
"step": 159
},
{
"epoch": 1.3793103448275863,
"grad_norm": 0.4669966995716095,
"learning_rate": 4.876007138643216e-06,
"loss": 0.0438,
"step": 160
},
{
"epoch": 1.3879310344827587,
"grad_norm": 0.47117388248443604,
"learning_rate": 4.871875562617837e-06,
"loss": 0.0464,
"step": 161
},
{
"epoch": 1.396551724137931,
"grad_norm": 0.6085790991783142,
"learning_rate": 4.867678084664365e-06,
"loss": 0.0574,
"step": 162
},
{
"epoch": 1.4051724137931034,
"grad_norm": 0.5736953616142273,
"learning_rate": 4.863414821408602e-06,
"loss": 0.0507,
"step": 163
},
{
"epoch": 1.4137931034482758,
"grad_norm": 0.5003281831741333,
"learning_rate": 4.8590858913041775e-06,
"loss": 0.051,
"step": 164
},
{
"epoch": 1.4224137931034484,
"grad_norm": 0.4361717104911804,
"learning_rate": 4.854691414629258e-06,
"loss": 0.0476,
"step": 165
},
{
"epoch": 1.4310344827586206,
"grad_norm": 0.47907084226608276,
"learning_rate": 4.8502315134832e-06,
"loss": 0.0495,
"step": 166
},
{
"epoch": 1.4396551724137931,
"grad_norm": 0.46805888414382935,
"learning_rate": 4.8457063117831656e-06,
"loss": 0.0474,
"step": 167
},
{
"epoch": 1.4482758620689655,
"grad_norm": 0.5058819651603699,
"learning_rate": 4.8411159352606735e-06,
"loss": 0.0541,
"step": 168
},
{
"epoch": 1.456896551724138,
"grad_norm": 0.4632813334465027,
"learning_rate": 4.836460511458107e-06,
"loss": 0.0409,
"step": 169
},
{
"epoch": 1.4655172413793103,
"grad_norm": 0.3654536008834839,
"learning_rate": 4.831740169725172e-06,
"loss": 0.0466,
"step": 170
},
{
"epoch": 1.4741379310344827,
"grad_norm": 0.4301295578479767,
"learning_rate": 4.8269550412153e-06,
"loss": 0.0472,
"step": 171
},
{
"epoch": 1.4827586206896552,
"grad_norm": 0.37643712759017944,
"learning_rate": 4.822105258882007e-06,
"loss": 0.0445,
"step": 172
},
{
"epoch": 1.4913793103448276,
"grad_norm": 0.3928871154785156,
"learning_rate": 4.817190957475199e-06,
"loss": 0.0427,
"step": 173
},
{
"epoch": 1.5,
"grad_norm": 0.39636102318763733,
"learning_rate": 4.812212273537426e-06,
"loss": 0.0431,
"step": 174
},
{
"epoch": 1.5086206896551724,
"grad_norm": 0.41640329360961914,
"learning_rate": 4.807169345400088e-06,
"loss": 0.0442,
"step": 175
},
{
"epoch": 1.5172413793103448,
"grad_norm": 0.4888840317726135,
"learning_rate": 4.802062313179595e-06,
"loss": 0.0529,
"step": 176
},
{
"epoch": 1.5258620689655173,
"grad_norm": 0.4154113829135895,
"learning_rate": 4.796891318773472e-06,
"loss": 0.0419,
"step": 177
},
{
"epoch": 1.5344827586206895,
"grad_norm": 0.41872596740722656,
"learning_rate": 4.791656505856416e-06,
"loss": 0.0411,
"step": 178
},
{
"epoch": 1.543103448275862,
"grad_norm": 0.4037671983242035,
"learning_rate": 4.786358019876301e-06,
"loss": 0.0458,
"step": 179
},
{
"epoch": 1.5517241379310345,
"grad_norm": 0.4356297552585602,
"learning_rate": 4.7809960080501464e-06,
"loss": 0.0495,
"step": 180
},
{
"epoch": 1.5603448275862069,
"grad_norm": 0.4112146496772766,
"learning_rate": 4.7755706193600135e-06,
"loss": 0.0367,
"step": 181
},
{
"epoch": 1.5689655172413794,
"grad_norm": 0.36699458956718445,
"learning_rate": 4.770082004548878e-06,
"loss": 0.0427,
"step": 182
},
{
"epoch": 1.5775862068965516,
"grad_norm": 0.49518612027168274,
"learning_rate": 4.764530316116433e-06,
"loss": 0.0334,
"step": 183
},
{
"epoch": 1.5862068965517242,
"grad_norm": 0.3718539774417877,
"learning_rate": 4.758915708314858e-06,
"loss": 0.0367,
"step": 184
},
{
"epoch": 1.5948275862068966,
"grad_norm": 0.46727338433265686,
"learning_rate": 4.753238337144528e-06,
"loss": 0.044,
"step": 185
},
{
"epoch": 1.603448275862069,
"grad_norm": 0.5345709323883057,
"learning_rate": 4.747498360349681e-06,
"loss": 0.0401,
"step": 186
},
{
"epoch": 1.6120689655172413,
"grad_norm": 0.561375617980957,
"learning_rate": 4.7416959374140405e-06,
"loss": 0.0457,
"step": 187
},
{
"epoch": 1.6206896551724137,
"grad_norm": 0.4418468773365021,
"learning_rate": 4.735831229556374e-06,
"loss": 0.0348,
"step": 188
},
{
"epoch": 1.6293103448275863,
"grad_norm": 0.5142704844474792,
"learning_rate": 4.72990439972602e-06,
"loss": 0.0391,
"step": 189
},
{
"epoch": 1.6379310344827587,
"grad_norm": 0.48653194308280945,
"learning_rate": 4.72391561259836e-06,
"loss": 0.0358,
"step": 190
},
{
"epoch": 1.646551724137931,
"grad_norm": 0.48498624563217163,
"learning_rate": 4.717865034570243e-06,
"loss": 0.038,
"step": 191
},
{
"epoch": 1.6551724137931034,
"grad_norm": 0.4586655795574188,
"learning_rate": 4.711752833755362e-06,
"loss": 0.0351,
"step": 192
},
{
"epoch": 1.6637931034482758,
"grad_norm": 0.44023749232292175,
"learning_rate": 4.70557917997958e-06,
"loss": 0.0277,
"step": 193
},
{
"epoch": 1.6724137931034484,
"grad_norm": 0.44018658995628357,
"learning_rate": 4.6993442447762185e-06,
"loss": 0.0382,
"step": 194
},
{
"epoch": 1.6810344827586206,
"grad_norm": 0.49303099513053894,
"learning_rate": 4.693048201381281e-06,
"loss": 0.0413,
"step": 195
},
{
"epoch": 1.6896551724137931,
"grad_norm": 0.572812557220459,
"learning_rate": 4.686691224728652e-06,
"loss": 0.0389,
"step": 196
},
{
"epoch": 1.6982758620689655,
"grad_norm": 0.4995497167110443,
"learning_rate": 4.680273491445227e-06,
"loss": 0.0314,
"step": 197
},
{
"epoch": 1.706896551724138,
"grad_norm": 0.465404212474823,
"learning_rate": 4.673795179846008e-06,
"loss": 0.0323,
"step": 198
},
{
"epoch": 1.7155172413793105,
"grad_norm": 0.45183637738227844,
"learning_rate": 4.667256469929149e-06,
"loss": 0.0308,
"step": 199
},
{
"epoch": 1.7241379310344827,
"grad_norm": 0.4891216456890106,
"learning_rate": 4.660657543370958e-06,
"loss": 0.034,
"step": 200
},
{
"epoch": 1.7327586206896552,
"grad_norm": 0.5728661417961121,
"learning_rate": 4.653998583520844e-06,
"loss": 0.0359,
"step": 201
},
{
"epoch": 1.7413793103448276,
"grad_norm": 0.5465419888496399,
"learning_rate": 4.6472797753962255e-06,
"loss": 0.0366,
"step": 202
},
{
"epoch": 1.75,
"grad_norm": 0.4671058654785156,
"learning_rate": 4.640501305677387e-06,
"loss": 0.0345,
"step": 203
},
{
"epoch": 1.7586206896551724,
"grad_norm": 0.4623485803604126,
"learning_rate": 4.6336633627023e-06,
"loss": 0.0261,
"step": 204
},
{
"epoch": 1.7672413793103448,
"grad_norm": 0.4471631944179535,
"learning_rate": 4.626766136461378e-06,
"loss": 0.0244,
"step": 205
},
{
"epoch": 1.7758620689655173,
"grad_norm": 0.46128782629966736,
"learning_rate": 4.61980981859221e-06,
"loss": 0.0301,
"step": 206
},
{
"epoch": 1.7844827586206895,
"grad_norm": 0.45246005058288574,
"learning_rate": 4.612794602374226e-06,
"loss": 0.0274,
"step": 207
},
{
"epoch": 1.793103448275862,
"grad_norm": 0.5971981287002563,
"learning_rate": 4.605720682723331e-06,
"loss": 0.0387,
"step": 208
},
{
"epoch": 1.8017241379310345,
"grad_norm": 0.4932047128677368,
"learning_rate": 4.598588256186491e-06,
"loss": 0.0228,
"step": 209
},
{
"epoch": 1.8103448275862069,
"grad_norm": 0.6208699345588684,
"learning_rate": 4.591397520936271e-06,
"loss": 0.0307,
"step": 210
},
{
"epoch": 1.8189655172413794,
"grad_norm": 0.5628708004951477,
"learning_rate": 4.584148676765327e-06,
"loss": 0.0262,
"step": 211
},
{
"epoch": 1.8275862068965516,
"grad_norm": 0.6893038749694824,
"learning_rate": 4.576841925080853e-06,
"loss": 0.0312,
"step": 212
},
{
"epoch": 1.8362068965517242,
"grad_norm": 0.5523396134376526,
"learning_rate": 4.569477468898992e-06,
"loss": 0.0218,
"step": 213
},
{
"epoch": 1.8448275862068966,
"grad_norm": 0.6797971129417419,
"learning_rate": 4.562055512839189e-06,
"loss": 0.0232,
"step": 214
},
{
"epoch": 1.853448275862069,
"grad_norm": 0.7214551568031311,
"learning_rate": 4.554576263118506e-06,
"loss": 0.0326,
"step": 215
},
{
"epoch": 1.8620689655172413,
"grad_norm": 0.5208979249000549,
"learning_rate": 4.547039927545899e-06,
"loss": 0.0261,
"step": 216
},
{
"epoch": 1.8706896551724137,
"grad_norm": 0.5396261215209961,
"learning_rate": 4.539446715516434e-06,
"loss": 0.0269,
"step": 217
},
{
"epoch": 1.8793103448275863,
"grad_norm": 0.5789651274681091,
"learning_rate": 4.531796838005477e-06,
"loss": 0.035,
"step": 218
},
{
"epoch": 1.8879310344827587,
"grad_norm": 0.5388063192367554,
"learning_rate": 4.524090507562828e-06,
"loss": 0.0277,
"step": 219
},
{
"epoch": 1.896551724137931,
"grad_norm": 0.4727953374385834,
"learning_rate": 4.516327938306818e-06,
"loss": 0.0225,
"step": 220
},
{
"epoch": 1.9051724137931034,
"grad_norm": 0.4665185809135437,
"learning_rate": 4.508509345918357e-06,
"loss": 0.0268,
"step": 221
},
{
"epoch": 1.9137931034482758,
"grad_norm": 0.5534846186637878,
"learning_rate": 4.500634947634943e-06,
"loss": 0.0277,
"step": 222
},
{
"epoch": 1.9224137931034484,
"grad_norm": 0.46098095178604126,
"learning_rate": 4.492704962244626e-06,
"loss": 0.0274,
"step": 223
},
{
"epoch": 1.9310344827586206,
"grad_norm": 0.49379560351371765,
"learning_rate": 4.4847196100799305e-06,
"loss": 0.0252,
"step": 224
},
{
"epoch": 1.9396551724137931,
"grad_norm": 0.4714183807373047,
"learning_rate": 4.476679113011729e-06,
"loss": 0.0253,
"step": 225
},
{
"epoch": 1.9482758620689655,
"grad_norm": 0.6230431199073792,
"learning_rate": 4.4685836944430815e-06,
"loss": 0.0242,
"step": 226
},
{
"epoch": 1.956896551724138,
"grad_norm": 0.42832595109939575,
"learning_rate": 4.46043357930303e-06,
"loss": 0.0165,
"step": 227
},
{
"epoch": 1.9655172413793105,
"grad_norm": 0.38421332836151123,
"learning_rate": 4.452228994040341e-06,
"loss": 0.0168,
"step": 228
},
{
"epoch": 1.9741379310344827,
"grad_norm": 0.5493133664131165,
"learning_rate": 4.443970166617223e-06,
"loss": 0.0193,
"step": 229
},
{
"epoch": 1.9827586206896552,
"grad_norm": 0.45186540484428406,
"learning_rate": 4.435657326502986e-06,
"loss": 0.0165,
"step": 230
},
{
"epoch": 1.9913793103448276,
"grad_norm": 0.8328711986541748,
"learning_rate": 4.4272907046676704e-06,
"loss": 0.025,
"step": 231
},
{
"epoch": 2.0,
"grad_norm": 0.5378574728965759,
"learning_rate": 4.418870533575626e-06,
"loss": 0.0166,
"step": 232
}
],
"logging_steps": 1,
"max_steps": 696,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 116,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.1971317692995994e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}