temp / outputs /checkpoint-500 /trainer_state.json
cyan2k's picture
Add files using upload-large-folder tool
27fa81d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1943068104537064,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0032,
"grad_norm": 13.684800148010254,
"learning_rate": 0.0,
"loss": 2.3276,
"step": 1
},
{
"epoch": 0.0064,
"grad_norm": 13.660787582397461,
"learning_rate": 4e-05,
"loss": 2.2792,
"step": 2
},
{
"epoch": 0.0096,
"grad_norm": 13.35280704498291,
"learning_rate": 8e-05,
"loss": 2.4151,
"step": 3
},
{
"epoch": 0.0128,
"grad_norm": 6.15027379989624,
"learning_rate": 0.00012,
"loss": 1.7812,
"step": 4
},
{
"epoch": 0.016,
"grad_norm": 1.3168226480484009,
"learning_rate": 0.00016,
"loss": 1.4536,
"step": 5
},
{
"epoch": 0.0192,
"grad_norm": 0.9872580170631409,
"learning_rate": 0.0002,
"loss": 1.4171,
"step": 6
},
{
"epoch": 0.0224,
"grad_norm": 0.7496100664138794,
"learning_rate": 0.00019935064935064936,
"loss": 1.4168,
"step": 7
},
{
"epoch": 0.0256,
"grad_norm": 0.7376005053520203,
"learning_rate": 0.00019870129870129872,
"loss": 1.3659,
"step": 8
},
{
"epoch": 0.0288,
"grad_norm": 0.5281137824058533,
"learning_rate": 0.00019805194805194807,
"loss": 1.2566,
"step": 9
},
{
"epoch": 0.032,
"grad_norm": 0.5485746264457703,
"learning_rate": 0.00019740259740259742,
"loss": 1.3761,
"step": 10
},
{
"epoch": 0.0352,
"grad_norm": 0.5506592392921448,
"learning_rate": 0.00019675324675324675,
"loss": 1.3327,
"step": 11
},
{
"epoch": 0.0384,
"grad_norm": 0.49382686614990234,
"learning_rate": 0.00019610389610389613,
"loss": 1.3727,
"step": 12
},
{
"epoch": 0.0416,
"grad_norm": 0.36203011870384216,
"learning_rate": 0.00019545454545454548,
"loss": 1.1515,
"step": 13
},
{
"epoch": 0.0448,
"grad_norm": 0.3528599739074707,
"learning_rate": 0.0001948051948051948,
"loss": 1.2636,
"step": 14
},
{
"epoch": 0.048,
"grad_norm": 0.31244418025016785,
"learning_rate": 0.00019415584415584416,
"loss": 1.1873,
"step": 15
},
{
"epoch": 0.0512,
"grad_norm": 0.3379523754119873,
"learning_rate": 0.00019350649350649354,
"loss": 1.2657,
"step": 16
},
{
"epoch": 0.0544,
"grad_norm": 0.3025083839893341,
"learning_rate": 0.00019285714285714286,
"loss": 1.2846,
"step": 17
},
{
"epoch": 0.0576,
"grad_norm": 0.2560190260410309,
"learning_rate": 0.00019220779220779222,
"loss": 1.1587,
"step": 18
},
{
"epoch": 0.0608,
"grad_norm": 0.2554129958152771,
"learning_rate": 0.00019155844155844157,
"loss": 1.2812,
"step": 19
},
{
"epoch": 0.064,
"grad_norm": 0.22662702202796936,
"learning_rate": 0.00019090909090909092,
"loss": 1.1664,
"step": 20
},
{
"epoch": 0.0672,
"grad_norm": 0.2515714168548584,
"learning_rate": 0.00019025974025974027,
"loss": 1.2177,
"step": 21
},
{
"epoch": 0.0704,
"grad_norm": 0.24396637082099915,
"learning_rate": 0.00018961038961038963,
"loss": 1.2053,
"step": 22
},
{
"epoch": 0.0736,
"grad_norm": 0.24488303065299988,
"learning_rate": 0.00018896103896103895,
"loss": 1.2074,
"step": 23
},
{
"epoch": 0.0768,
"grad_norm": 0.2168620079755783,
"learning_rate": 0.00018831168831168833,
"loss": 1.1284,
"step": 24
},
{
"epoch": 0.08,
"grad_norm": 0.24021224677562714,
"learning_rate": 0.00018766233766233769,
"loss": 1.2169,
"step": 25
},
{
"epoch": 0.0832,
"grad_norm": 0.20057056844234467,
"learning_rate": 0.000187012987012987,
"loss": 1.1031,
"step": 26
},
{
"epoch": 0.0864,
"grad_norm": 0.19900795817375183,
"learning_rate": 0.00018636363636363636,
"loss": 1.1004,
"step": 27
},
{
"epoch": 0.0896,
"grad_norm": 0.2019268423318863,
"learning_rate": 0.00018571428571428572,
"loss": 1.1476,
"step": 28
},
{
"epoch": 0.0928,
"grad_norm": 0.1996479034423828,
"learning_rate": 0.00018506493506493507,
"loss": 1.1455,
"step": 29
},
{
"epoch": 0.096,
"grad_norm": 0.25262022018432617,
"learning_rate": 0.00018441558441558442,
"loss": 1.1025,
"step": 30
},
{
"epoch": 0.0992,
"grad_norm": 0.225438192486763,
"learning_rate": 0.00018376623376623378,
"loss": 1.1954,
"step": 31
},
{
"epoch": 0.1024,
"grad_norm": 0.17834505438804626,
"learning_rate": 0.00018311688311688313,
"loss": 1.0934,
"step": 32
},
{
"epoch": 0.1056,
"grad_norm": 0.20071206986904144,
"learning_rate": 0.00018246753246753248,
"loss": 1.0488,
"step": 33
},
{
"epoch": 0.1088,
"grad_norm": 0.1920139640569687,
"learning_rate": 0.00018181818181818183,
"loss": 1.123,
"step": 34
},
{
"epoch": 0.112,
"grad_norm": 0.18714852631092072,
"learning_rate": 0.0001811688311688312,
"loss": 1.0798,
"step": 35
},
{
"epoch": 0.1152,
"grad_norm": 0.18315713107585907,
"learning_rate": 0.00018051948051948054,
"loss": 1.1107,
"step": 36
},
{
"epoch": 0.1184,
"grad_norm": 0.19156870245933533,
"learning_rate": 0.00017987012987012987,
"loss": 1.1125,
"step": 37
},
{
"epoch": 0.1216,
"grad_norm": 0.21527768671512604,
"learning_rate": 0.00017922077922077922,
"loss": 1.1346,
"step": 38
},
{
"epoch": 0.1248,
"grad_norm": 0.1871163249015808,
"learning_rate": 0.0001785714285714286,
"loss": 1.0742,
"step": 39
},
{
"epoch": 0.128,
"grad_norm": 0.17750784754753113,
"learning_rate": 0.00017792207792207792,
"loss": 1.1323,
"step": 40
},
{
"epoch": 0.1312,
"grad_norm": 0.177419051527977,
"learning_rate": 0.00017727272727272728,
"loss": 1.1405,
"step": 41
},
{
"epoch": 0.1344,
"grad_norm": 0.16714292764663696,
"learning_rate": 0.00017662337662337663,
"loss": 1.1084,
"step": 42
},
{
"epoch": 0.1376,
"grad_norm": 0.1610356718301773,
"learning_rate": 0.00017597402597402598,
"loss": 1.1125,
"step": 43
},
{
"epoch": 0.1408,
"grad_norm": 0.2548656761646271,
"learning_rate": 0.00017532467532467534,
"loss": 1.1114,
"step": 44
},
{
"epoch": 0.144,
"grad_norm": 0.1731044203042984,
"learning_rate": 0.0001746753246753247,
"loss": 1.1197,
"step": 45
},
{
"epoch": 0.1472,
"grad_norm": 0.1739533394575119,
"learning_rate": 0.00017402597402597401,
"loss": 1.1777,
"step": 46
},
{
"epoch": 0.1504,
"grad_norm": 0.2178352177143097,
"learning_rate": 0.0001733766233766234,
"loss": 1.1111,
"step": 47
},
{
"epoch": 0.1536,
"grad_norm": 0.17247150838375092,
"learning_rate": 0.00017272727272727275,
"loss": 1.1253,
"step": 48
},
{
"epoch": 0.1568,
"grad_norm": 0.18075324594974518,
"learning_rate": 0.00017207792207792207,
"loss": 1.1358,
"step": 49
},
{
"epoch": 0.16,
"grad_norm": 0.15898071229457855,
"learning_rate": 0.00017142857142857143,
"loss": 1.0606,
"step": 50
},
{
"epoch": 0.1632,
"grad_norm": 0.16518613696098328,
"learning_rate": 0.0001707792207792208,
"loss": 1.0944,
"step": 51
},
{
"epoch": 0.1664,
"grad_norm": 0.16035063564777374,
"learning_rate": 0.00017012987012987013,
"loss": 1.0554,
"step": 52
},
{
"epoch": 0.1696,
"grad_norm": 0.1686483472585678,
"learning_rate": 0.00016948051948051948,
"loss": 1.0384,
"step": 53
},
{
"epoch": 0.1728,
"grad_norm": 0.16575631499290466,
"learning_rate": 0.00016883116883116884,
"loss": 1.0243,
"step": 54
},
{
"epoch": 0.176,
"grad_norm": 0.16840039193630219,
"learning_rate": 0.0001681818181818182,
"loss": 1.117,
"step": 55
},
{
"epoch": 0.1792,
"grad_norm": 0.17616064846515656,
"learning_rate": 0.00016753246753246754,
"loss": 1.0743,
"step": 56
},
{
"epoch": 0.1824,
"grad_norm": 0.168218195438385,
"learning_rate": 0.0001668831168831169,
"loss": 1.0627,
"step": 57
},
{
"epoch": 0.1856,
"grad_norm": 0.17026656866073608,
"learning_rate": 0.00016623376623376625,
"loss": 1.0059,
"step": 58
},
{
"epoch": 0.1888,
"grad_norm": 0.16454458236694336,
"learning_rate": 0.0001655844155844156,
"loss": 0.9943,
"step": 59
},
{
"epoch": 0.192,
"grad_norm": 0.17185136675834656,
"learning_rate": 0.00016493506493506495,
"loss": 1.1545,
"step": 60
},
{
"epoch": 0.1952,
"grad_norm": 0.17822986841201782,
"learning_rate": 0.00016428571428571428,
"loss": 1.073,
"step": 61
},
{
"epoch": 0.1984,
"grad_norm": 0.1676608771085739,
"learning_rate": 0.00016363636363636366,
"loss": 1.0886,
"step": 62
},
{
"epoch": 0.2016,
"grad_norm": 0.1727771908044815,
"learning_rate": 0.000162987012987013,
"loss": 1.0432,
"step": 63
},
{
"epoch": 0.2048,
"grad_norm": 0.17827573418617249,
"learning_rate": 0.00016233766233766234,
"loss": 1.083,
"step": 64
},
{
"epoch": 0.208,
"grad_norm": 0.19807517528533936,
"learning_rate": 0.0001616883116883117,
"loss": 1.1208,
"step": 65
},
{
"epoch": 0.2112,
"grad_norm": 0.17693684995174408,
"learning_rate": 0.00016103896103896104,
"loss": 1.089,
"step": 66
},
{
"epoch": 0.2144,
"grad_norm": 0.15489234030246735,
"learning_rate": 0.0001603896103896104,
"loss": 0.9707,
"step": 67
},
{
"epoch": 0.2176,
"grad_norm": 0.16443990170955658,
"learning_rate": 0.00015974025974025975,
"loss": 1.0643,
"step": 68
},
{
"epoch": 0.2208,
"grad_norm": 0.2051103413105011,
"learning_rate": 0.0001590909090909091,
"loss": 1.1246,
"step": 69
},
{
"epoch": 0.224,
"grad_norm": 0.18824075162410736,
"learning_rate": 0.00015844155844155845,
"loss": 1.0855,
"step": 70
},
{
"epoch": 0.2272,
"grad_norm": 0.18659448623657227,
"learning_rate": 0.0001577922077922078,
"loss": 1.1412,
"step": 71
},
{
"epoch": 0.2304,
"grad_norm": 0.1854114979505539,
"learning_rate": 0.00015714285714285716,
"loss": 1.0249,
"step": 72
},
{
"epoch": 0.2336,
"grad_norm": 0.1876193732023239,
"learning_rate": 0.00015649350649350649,
"loss": 1.1029,
"step": 73
},
{
"epoch": 0.2368,
"grad_norm": 0.1888684630393982,
"learning_rate": 0.00015584415584415587,
"loss": 1.0789,
"step": 74
},
{
"epoch": 0.24,
"grad_norm": 0.20240606367588043,
"learning_rate": 0.0001551948051948052,
"loss": 1.0495,
"step": 75
},
{
"epoch": 0.2432,
"grad_norm": 0.232120081782341,
"learning_rate": 0.00015454545454545454,
"loss": 1.0735,
"step": 76
},
{
"epoch": 0.2464,
"grad_norm": 0.16897843778133392,
"learning_rate": 0.0001538961038961039,
"loss": 1.0164,
"step": 77
},
{
"epoch": 0.2496,
"grad_norm": 0.18796634674072266,
"learning_rate": 0.00015324675324675325,
"loss": 1.0676,
"step": 78
},
{
"epoch": 0.2528,
"grad_norm": 0.19574032723903656,
"learning_rate": 0.0001525974025974026,
"loss": 1.0456,
"step": 79
},
{
"epoch": 0.256,
"grad_norm": 0.18007811903953552,
"learning_rate": 0.00015194805194805196,
"loss": 1.0894,
"step": 80
},
{
"epoch": 0.2592,
"grad_norm": 0.18932929635047913,
"learning_rate": 0.0001512987012987013,
"loss": 1.0729,
"step": 81
},
{
"epoch": 0.2624,
"grad_norm": 0.20614288747310638,
"learning_rate": 0.00015064935064935066,
"loss": 1.0854,
"step": 82
},
{
"epoch": 0.2656,
"grad_norm": 0.19291089475154877,
"learning_rate": 0.00015000000000000001,
"loss": 1.1217,
"step": 83
},
{
"epoch": 0.2688,
"grad_norm": 0.18916529417037964,
"learning_rate": 0.00014935064935064934,
"loss": 1.0963,
"step": 84
},
{
"epoch": 0.272,
"grad_norm": 0.20306220650672913,
"learning_rate": 0.00014870129870129872,
"loss": 1.0898,
"step": 85
},
{
"epoch": 0.2752,
"grad_norm": 0.17870067059993744,
"learning_rate": 0.00014805194805194807,
"loss": 1.0213,
"step": 86
},
{
"epoch": 0.2784,
"grad_norm": 0.18411923944950104,
"learning_rate": 0.0001474025974025974,
"loss": 1.0844,
"step": 87
},
{
"epoch": 0.2816,
"grad_norm": 0.18788227438926697,
"learning_rate": 0.00014675324675324675,
"loss": 1.0338,
"step": 88
},
{
"epoch": 0.2848,
"grad_norm": 0.23874884843826294,
"learning_rate": 0.00014610389610389613,
"loss": 1.1118,
"step": 89
},
{
"epoch": 0.288,
"grad_norm": 0.19380499422550201,
"learning_rate": 0.00014545454545454546,
"loss": 1.0464,
"step": 90
},
{
"epoch": 0.2912,
"grad_norm": 0.18968750536441803,
"learning_rate": 0.0001448051948051948,
"loss": 1.0569,
"step": 91
},
{
"epoch": 0.2944,
"grad_norm": 0.19545753300189972,
"learning_rate": 0.00014415584415584416,
"loss": 1.1225,
"step": 92
},
{
"epoch": 0.2976,
"grad_norm": 0.19170494377613068,
"learning_rate": 0.00014350649350649352,
"loss": 1.0602,
"step": 93
},
{
"epoch": 0.3008,
"grad_norm": 0.17953918874263763,
"learning_rate": 0.00014285714285714287,
"loss": 1.032,
"step": 94
},
{
"epoch": 0.304,
"grad_norm": 0.1822536289691925,
"learning_rate": 0.00014220779220779222,
"loss": 1.0559,
"step": 95
},
{
"epoch": 0.3072,
"grad_norm": 0.18591298162937164,
"learning_rate": 0.00014155844155844155,
"loss": 1.031,
"step": 96
},
{
"epoch": 0.3104,
"grad_norm": 0.2129002958536148,
"learning_rate": 0.00014090909090909093,
"loss": 1.1391,
"step": 97
},
{
"epoch": 0.3136,
"grad_norm": 0.18386681377887726,
"learning_rate": 0.00014025974025974028,
"loss": 0.9919,
"step": 98
},
{
"epoch": 0.3168,
"grad_norm": 0.18314239382743835,
"learning_rate": 0.0001396103896103896,
"loss": 1.0445,
"step": 99
},
{
"epoch": 0.32,
"grad_norm": 0.1999066174030304,
"learning_rate": 0.00013896103896103896,
"loss": 1.0538,
"step": 100
},
{
"epoch": 0.3232,
"grad_norm": 0.18741188943386078,
"learning_rate": 0.00013831168831168834,
"loss": 1.0722,
"step": 101
},
{
"epoch": 0.3264,
"grad_norm": 0.19351010024547577,
"learning_rate": 0.00013766233766233766,
"loss": 1.0491,
"step": 102
},
{
"epoch": 0.3296,
"grad_norm": 0.18859203159809113,
"learning_rate": 0.00013701298701298702,
"loss": 1.0593,
"step": 103
},
{
"epoch": 0.3328,
"grad_norm": 0.1962767392396927,
"learning_rate": 0.00013636363636363637,
"loss": 1.1344,
"step": 104
},
{
"epoch": 0.336,
"grad_norm": 0.20819440484046936,
"learning_rate": 0.00013571428571428572,
"loss": 1.1137,
"step": 105
},
{
"epoch": 0.3392,
"grad_norm": 0.19590184092521667,
"learning_rate": 0.00013506493506493507,
"loss": 1.0624,
"step": 106
},
{
"epoch": 0.3424,
"grad_norm": 0.18631424009799957,
"learning_rate": 0.00013441558441558443,
"loss": 1.0587,
"step": 107
},
{
"epoch": 0.3456,
"grad_norm": 0.19572143256664276,
"learning_rate": 0.00013376623376623375,
"loss": 1.0494,
"step": 108
},
{
"epoch": 0.3488,
"grad_norm": 0.1910988837480545,
"learning_rate": 0.00013311688311688313,
"loss": 1.0481,
"step": 109
},
{
"epoch": 0.352,
"grad_norm": 0.19455869495868683,
"learning_rate": 0.00013246753246753249,
"loss": 1.029,
"step": 110
},
{
"epoch": 0.3552,
"grad_norm": 0.18669827282428741,
"learning_rate": 0.0001318181818181818,
"loss": 1.0513,
"step": 111
},
{
"epoch": 0.3584,
"grad_norm": 0.17523664236068726,
"learning_rate": 0.0001311688311688312,
"loss": 1.0126,
"step": 112
},
{
"epoch": 0.3616,
"grad_norm": 0.17929129302501678,
"learning_rate": 0.00013051948051948052,
"loss": 1.0717,
"step": 113
},
{
"epoch": 0.3648,
"grad_norm": 0.19380168616771698,
"learning_rate": 0.00012987012987012987,
"loss": 1.0324,
"step": 114
},
{
"epoch": 0.368,
"grad_norm": 0.18090228736400604,
"learning_rate": 0.00012922077922077922,
"loss": 1.0515,
"step": 115
},
{
"epoch": 0.3712,
"grad_norm": 0.2067340910434723,
"learning_rate": 0.00012857142857142858,
"loss": 1.0939,
"step": 116
},
{
"epoch": 0.3744,
"grad_norm": 0.1880485862493515,
"learning_rate": 0.00012792207792207793,
"loss": 1.0986,
"step": 117
},
{
"epoch": 0.3776,
"grad_norm": 0.182168647646904,
"learning_rate": 0.00012727272727272728,
"loss": 1.0109,
"step": 118
},
{
"epoch": 0.3808,
"grad_norm": 0.20187129080295563,
"learning_rate": 0.00012662337662337663,
"loss": 1.0668,
"step": 119
},
{
"epoch": 0.384,
"grad_norm": 0.2082669734954834,
"learning_rate": 0.000125974025974026,
"loss": 1.054,
"step": 120
},
{
"epoch": 0.3872,
"grad_norm": 0.18294434249401093,
"learning_rate": 0.00012532467532467534,
"loss": 1.0397,
"step": 121
},
{
"epoch": 0.3904,
"grad_norm": 0.20515067875385284,
"learning_rate": 0.00012467532467532467,
"loss": 1.1092,
"step": 122
},
{
"epoch": 0.3936,
"grad_norm": 0.1758790761232376,
"learning_rate": 0.00012402597402597402,
"loss": 0.9755,
"step": 123
},
{
"epoch": 0.3968,
"grad_norm": 0.2170792669057846,
"learning_rate": 0.0001233766233766234,
"loss": 1.0434,
"step": 124
},
{
"epoch": 0.4,
"grad_norm": 0.202157124876976,
"learning_rate": 0.00012272727272727272,
"loss": 1.1129,
"step": 125
},
{
"epoch": 0.4032,
"grad_norm": 0.18556398153305054,
"learning_rate": 0.00012207792207792208,
"loss": 1.0665,
"step": 126
},
{
"epoch": 0.4064,
"grad_norm": 0.20196087658405304,
"learning_rate": 0.00012142857142857143,
"loss": 1.1,
"step": 127
},
{
"epoch": 0.4096,
"grad_norm": 0.1921566128730774,
"learning_rate": 0.0001207792207792208,
"loss": 1.0918,
"step": 128
},
{
"epoch": 0.4128,
"grad_norm": 0.18866224586963654,
"learning_rate": 0.00012012987012987014,
"loss": 1.0014,
"step": 129
},
{
"epoch": 0.416,
"grad_norm": 0.207601398229599,
"learning_rate": 0.00011948051948051949,
"loss": 1.0726,
"step": 130
},
{
"epoch": 0.4192,
"grad_norm": 0.21592366695404053,
"learning_rate": 0.00011883116883116883,
"loss": 1.1379,
"step": 131
},
{
"epoch": 0.4224,
"grad_norm": 0.2016124576330185,
"learning_rate": 0.0001181818181818182,
"loss": 1.1428,
"step": 132
},
{
"epoch": 0.4256,
"grad_norm": 0.20478437840938568,
"learning_rate": 0.00011753246753246753,
"loss": 1.121,
"step": 133
},
{
"epoch": 0.4288,
"grad_norm": 0.22730594873428345,
"learning_rate": 0.00011688311688311689,
"loss": 1.0319,
"step": 134
},
{
"epoch": 0.432,
"grad_norm": 0.22592711448669434,
"learning_rate": 0.00011623376623376625,
"loss": 1.1264,
"step": 135
},
{
"epoch": 0.4352,
"grad_norm": 0.20035041868686676,
"learning_rate": 0.00011558441558441559,
"loss": 1.0686,
"step": 136
},
{
"epoch": 0.4384,
"grad_norm": 0.20648567378520966,
"learning_rate": 0.00011493506493506494,
"loss": 1.0817,
"step": 137
},
{
"epoch": 0.4416,
"grad_norm": 0.21222743391990662,
"learning_rate": 0.00011428571428571428,
"loss": 1.0678,
"step": 138
},
{
"epoch": 0.4448,
"grad_norm": 0.2075391560792923,
"learning_rate": 0.00011363636363636365,
"loss": 1.0897,
"step": 139
},
{
"epoch": 0.448,
"grad_norm": 0.1964101791381836,
"learning_rate": 0.000112987012987013,
"loss": 1.0906,
"step": 140
},
{
"epoch": 0.4512,
"grad_norm": 0.22406511008739471,
"learning_rate": 0.00011233766233766234,
"loss": 1.0594,
"step": 141
},
{
"epoch": 0.4544,
"grad_norm": 0.23787978291511536,
"learning_rate": 0.00011168831168831168,
"loss": 1.1053,
"step": 142
},
{
"epoch": 0.4576,
"grad_norm": 0.21196185052394867,
"learning_rate": 0.00011103896103896105,
"loss": 1.0923,
"step": 143
},
{
"epoch": 0.4608,
"grad_norm": 0.21042804419994354,
"learning_rate": 0.0001103896103896104,
"loss": 1.0381,
"step": 144
},
{
"epoch": 0.464,
"grad_norm": 0.2267436534166336,
"learning_rate": 0.00010974025974025974,
"loss": 1.0818,
"step": 145
},
{
"epoch": 0.4672,
"grad_norm": 0.23742735385894775,
"learning_rate": 0.00010909090909090909,
"loss": 1.0872,
"step": 146
},
{
"epoch": 0.4704,
"grad_norm": 0.17787213623523712,
"learning_rate": 0.00010844155844155846,
"loss": 1.03,
"step": 147
},
{
"epoch": 0.4736,
"grad_norm": 0.22422832250595093,
"learning_rate": 0.0001077922077922078,
"loss": 1.0738,
"step": 148
},
{
"epoch": 0.4768,
"grad_norm": 0.22946301102638245,
"learning_rate": 0.00010714285714285715,
"loss": 1.0274,
"step": 149
},
{
"epoch": 0.48,
"grad_norm": 0.2137996405363083,
"learning_rate": 0.00010649350649350649,
"loss": 1.0539,
"step": 150
},
{
"epoch": 0.4832,
"grad_norm": 0.1748756766319275,
"learning_rate": 0.00010584415584415586,
"loss": 1.0355,
"step": 151
},
{
"epoch": 0.4864,
"grad_norm": 0.22275175154209137,
"learning_rate": 0.0001051948051948052,
"loss": 1.1696,
"step": 152
},
{
"epoch": 0.4896,
"grad_norm": 0.20996077358722687,
"learning_rate": 0.00010454545454545455,
"loss": 1.0303,
"step": 153
},
{
"epoch": 0.4928,
"grad_norm": 0.1945938766002655,
"learning_rate": 0.00010389610389610389,
"loss": 0.9747,
"step": 154
},
{
"epoch": 0.496,
"grad_norm": 0.1970377266407013,
"learning_rate": 0.00010324675324675325,
"loss": 1.0358,
"step": 155
},
{
"epoch": 0.4992,
"grad_norm": 0.18814732134342194,
"learning_rate": 0.00010259740259740261,
"loss": 0.9612,
"step": 156
},
{
"epoch": 0.5024,
"grad_norm": 0.2153233289718628,
"learning_rate": 0.00010194805194805195,
"loss": 1.0749,
"step": 157
},
{
"epoch": 0.5056,
"grad_norm": 0.21788008511066437,
"learning_rate": 0.0001012987012987013,
"loss": 1.0883,
"step": 158
},
{
"epoch": 0.5088,
"grad_norm": 0.214650496840477,
"learning_rate": 0.00010064935064935067,
"loss": 1.0539,
"step": 159
},
{
"epoch": 0.512,
"grad_norm": 0.19312834739685059,
"learning_rate": 0.0001,
"loss": 1.0657,
"step": 160
},
{
"epoch": 0.5152,
"grad_norm": 0.19916598498821259,
"learning_rate": 9.935064935064936e-05,
"loss": 1.0478,
"step": 161
},
{
"epoch": 0.5184,
"grad_norm": 0.2057606726884842,
"learning_rate": 9.870129870129871e-05,
"loss": 1.0094,
"step": 162
},
{
"epoch": 0.5216,
"grad_norm": 0.22159607708454132,
"learning_rate": 9.805194805194806e-05,
"loss": 1.0952,
"step": 163
},
{
"epoch": 0.5248,
"grad_norm": 0.18274275958538055,
"learning_rate": 9.74025974025974e-05,
"loss": 1.0065,
"step": 164
},
{
"epoch": 0.528,
"grad_norm": 0.19835162162780762,
"learning_rate": 9.675324675324677e-05,
"loss": 1.0742,
"step": 165
},
{
"epoch": 0.5312,
"grad_norm": 0.2114904820919037,
"learning_rate": 9.610389610389611e-05,
"loss": 1.1109,
"step": 166
},
{
"epoch": 0.5344,
"grad_norm": 0.21488523483276367,
"learning_rate": 9.545454545454546e-05,
"loss": 1.0465,
"step": 167
},
{
"epoch": 0.5376,
"grad_norm": 0.19870303571224213,
"learning_rate": 9.480519480519481e-05,
"loss": 1.0318,
"step": 168
},
{
"epoch": 0.5408,
"grad_norm": 0.20413029193878174,
"learning_rate": 9.415584415584417e-05,
"loss": 1.0817,
"step": 169
},
{
"epoch": 0.544,
"grad_norm": 0.1847231239080429,
"learning_rate": 9.35064935064935e-05,
"loss": 1.0144,
"step": 170
},
{
"epoch": 0.5472,
"grad_norm": 0.2715964913368225,
"learning_rate": 9.285714285714286e-05,
"loss": 0.9832,
"step": 171
},
{
"epoch": 0.5504,
"grad_norm": 0.2225002497434616,
"learning_rate": 9.220779220779221e-05,
"loss": 1.1051,
"step": 172
},
{
"epoch": 0.5536,
"grad_norm": 0.22931510210037231,
"learning_rate": 9.155844155844156e-05,
"loss": 1.1042,
"step": 173
},
{
"epoch": 0.5568,
"grad_norm": 0.21848627924919128,
"learning_rate": 9.090909090909092e-05,
"loss": 1.1151,
"step": 174
},
{
"epoch": 0.56,
"grad_norm": 0.19852259755134583,
"learning_rate": 9.025974025974027e-05,
"loss": 1.0889,
"step": 175
},
{
"epoch": 0.5632,
"grad_norm": 0.2080363780260086,
"learning_rate": 8.961038961038961e-05,
"loss": 1.0777,
"step": 176
},
{
"epoch": 0.5664,
"grad_norm": 0.22391024231910706,
"learning_rate": 8.896103896103896e-05,
"loss": 1.1092,
"step": 177
},
{
"epoch": 0.5696,
"grad_norm": 0.21793846786022186,
"learning_rate": 8.831168831168831e-05,
"loss": 1.044,
"step": 178
},
{
"epoch": 0.5728,
"grad_norm": 0.2009749859571457,
"learning_rate": 8.766233766233767e-05,
"loss": 1.0198,
"step": 179
},
{
"epoch": 0.576,
"grad_norm": 0.19432318210601807,
"learning_rate": 8.701298701298701e-05,
"loss": 1.075,
"step": 180
},
{
"epoch": 0.5792,
"grad_norm": 0.18634547293186188,
"learning_rate": 8.636363636363637e-05,
"loss": 0.9964,
"step": 181
},
{
"epoch": 0.5824,
"grad_norm": 0.1947103589773178,
"learning_rate": 8.571428571428571e-05,
"loss": 1.0025,
"step": 182
},
{
"epoch": 0.5856,
"grad_norm": 0.23098671436309814,
"learning_rate": 8.506493506493507e-05,
"loss": 1.0562,
"step": 183
},
{
"epoch": 0.5888,
"grad_norm": 0.19686414301395416,
"learning_rate": 8.441558441558442e-05,
"loss": 1.0285,
"step": 184
},
{
"epoch": 0.592,
"grad_norm": 0.19852428138256073,
"learning_rate": 8.376623376623377e-05,
"loss": 1.0054,
"step": 185
},
{
"epoch": 0.5952,
"grad_norm": 0.21483510732650757,
"learning_rate": 8.311688311688312e-05,
"loss": 1.108,
"step": 186
},
{
"epoch": 0.5984,
"grad_norm": 0.23313644528388977,
"learning_rate": 8.246753246753248e-05,
"loss": 1.1383,
"step": 187
},
{
"epoch": 0.6016,
"grad_norm": 0.21453145146369934,
"learning_rate": 8.181818181818183e-05,
"loss": 1.0911,
"step": 188
},
{
"epoch": 0.6048,
"grad_norm": 0.20268195867538452,
"learning_rate": 8.116883116883117e-05,
"loss": 1.0145,
"step": 189
},
{
"epoch": 0.608,
"grad_norm": 0.20576398074626923,
"learning_rate": 8.051948051948052e-05,
"loss": 1.0829,
"step": 190
},
{
"epoch": 0.6112,
"grad_norm": 0.21732626855373383,
"learning_rate": 7.987012987012987e-05,
"loss": 1.0152,
"step": 191
},
{
"epoch": 0.6144,
"grad_norm": 0.22046895325183868,
"learning_rate": 7.922077922077923e-05,
"loss": 1.1311,
"step": 192
},
{
"epoch": 0.6176,
"grad_norm": 0.19727715849876404,
"learning_rate": 7.857142857142858e-05,
"loss": 1.0364,
"step": 193
},
{
"epoch": 0.6208,
"grad_norm": 0.20861488580703735,
"learning_rate": 7.792207792207793e-05,
"loss": 1.0435,
"step": 194
},
{
"epoch": 0.624,
"grad_norm": 0.18545083701610565,
"learning_rate": 7.727272727272727e-05,
"loss": 1.0299,
"step": 195
},
{
"epoch": 0.6272,
"grad_norm": 0.19965052604675293,
"learning_rate": 7.662337662337662e-05,
"loss": 1.0511,
"step": 196
},
{
"epoch": 0.6304,
"grad_norm": 0.23673909902572632,
"learning_rate": 7.597402597402598e-05,
"loss": 1.081,
"step": 197
},
{
"epoch": 0.6336,
"grad_norm": 0.17583179473876953,
"learning_rate": 7.532467532467533e-05,
"loss": 0.9808,
"step": 198
},
{
"epoch": 0.6368,
"grad_norm": 0.2129366099834442,
"learning_rate": 7.467532467532467e-05,
"loss": 1.0522,
"step": 199
},
{
"epoch": 0.64,
"grad_norm": 0.21679140627384186,
"learning_rate": 7.402597402597404e-05,
"loss": 1.0567,
"step": 200
},
{
"epoch": 0.6432,
"grad_norm": 0.2032000720500946,
"learning_rate": 7.337662337662338e-05,
"loss": 1.0466,
"step": 201
},
{
"epoch": 0.6464,
"grad_norm": 0.1887970268726349,
"learning_rate": 7.272727272727273e-05,
"loss": 1.0329,
"step": 202
},
{
"epoch": 0.6496,
"grad_norm": 0.21060192584991455,
"learning_rate": 7.207792207792208e-05,
"loss": 1.1021,
"step": 203
},
{
"epoch": 0.6528,
"grad_norm": 0.21191425621509552,
"learning_rate": 7.142857142857143e-05,
"loss": 0.99,
"step": 204
},
{
"epoch": 0.656,
"grad_norm": 0.1995989829301834,
"learning_rate": 7.077922077922077e-05,
"loss": 1.0526,
"step": 205
},
{
"epoch": 0.6592,
"grad_norm": 0.1849513053894043,
"learning_rate": 7.012987012987014e-05,
"loss": 0.9998,
"step": 206
},
{
"epoch": 0.6624,
"grad_norm": 0.1948779672384262,
"learning_rate": 6.948051948051948e-05,
"loss": 1.075,
"step": 207
},
{
"epoch": 0.6656,
"grad_norm": 0.20374052226543427,
"learning_rate": 6.883116883116883e-05,
"loss": 1.0933,
"step": 208
},
{
"epoch": 0.6688,
"grad_norm": 0.2102465033531189,
"learning_rate": 6.818181818181818e-05,
"loss": 1.1123,
"step": 209
},
{
"epoch": 0.672,
"grad_norm": 0.21376173198223114,
"learning_rate": 6.753246753246754e-05,
"loss": 1.1233,
"step": 210
},
{
"epoch": 0.6752,
"grad_norm": 0.20934203267097473,
"learning_rate": 6.688311688311688e-05,
"loss": 1.1374,
"step": 211
},
{
"epoch": 0.6784,
"grad_norm": 0.18604128062725067,
"learning_rate": 6.623376623376624e-05,
"loss": 1.0213,
"step": 212
},
{
"epoch": 0.6816,
"grad_norm": 0.19644233584403992,
"learning_rate": 6.55844155844156e-05,
"loss": 1.0046,
"step": 213
},
{
"epoch": 0.6848,
"grad_norm": 0.18479463458061218,
"learning_rate": 6.493506493506494e-05,
"loss": 0.9792,
"step": 214
},
{
"epoch": 0.688,
"grad_norm": 0.1945149153470993,
"learning_rate": 6.428571428571429e-05,
"loss": 1.0584,
"step": 215
},
{
"epoch": 0.6912,
"grad_norm": 0.2070147544145584,
"learning_rate": 6.363636363636364e-05,
"loss": 1.071,
"step": 216
},
{
"epoch": 0.6944,
"grad_norm": 0.19645985960960388,
"learning_rate": 6.2987012987013e-05,
"loss": 1.0721,
"step": 217
},
{
"epoch": 0.6976,
"grad_norm": 0.1960117667913437,
"learning_rate": 6.233766233766233e-05,
"loss": 1.071,
"step": 218
},
{
"epoch": 0.7008,
"grad_norm": 0.20168261229991913,
"learning_rate": 6.16883116883117e-05,
"loss": 1.0808,
"step": 219
},
{
"epoch": 0.704,
"grad_norm": 0.21254412829875946,
"learning_rate": 6.103896103896104e-05,
"loss": 1.0287,
"step": 220
},
{
"epoch": 0.7072,
"grad_norm": 0.21271063387393951,
"learning_rate": 6.03896103896104e-05,
"loss": 1.0605,
"step": 221
},
{
"epoch": 0.7104,
"grad_norm": 0.2081408053636551,
"learning_rate": 5.9740259740259744e-05,
"loss": 1.091,
"step": 222
},
{
"epoch": 0.7136,
"grad_norm": 0.21113798022270203,
"learning_rate": 5.90909090909091e-05,
"loss": 1.1323,
"step": 223
},
{
"epoch": 0.7168,
"grad_norm": 0.20670844614505768,
"learning_rate": 5.844155844155844e-05,
"loss": 1.0955,
"step": 224
},
{
"epoch": 0.72,
"grad_norm": 0.2010120451450348,
"learning_rate": 5.7792207792207796e-05,
"loss": 1.1068,
"step": 225
},
{
"epoch": 0.7232,
"grad_norm": 0.20379121601581573,
"learning_rate": 5.714285714285714e-05,
"loss": 1.0419,
"step": 226
},
{
"epoch": 0.7264,
"grad_norm": 0.22799807786941528,
"learning_rate": 5.64935064935065e-05,
"loss": 1.0904,
"step": 227
},
{
"epoch": 0.7296,
"grad_norm": 0.2005995213985443,
"learning_rate": 5.584415584415584e-05,
"loss": 1.078,
"step": 228
},
{
"epoch": 0.7328,
"grad_norm": 0.20329605042934418,
"learning_rate": 5.51948051948052e-05,
"loss": 1.0245,
"step": 229
},
{
"epoch": 0.736,
"grad_norm": 0.19283504784107208,
"learning_rate": 5.4545454545454546e-05,
"loss": 1.0367,
"step": 230
},
{
"epoch": 0.7392,
"grad_norm": 0.20624355971813202,
"learning_rate": 5.38961038961039e-05,
"loss": 1.1046,
"step": 231
},
{
"epoch": 0.7424,
"grad_norm": 0.21362991631031036,
"learning_rate": 5.3246753246753245e-05,
"loss": 1.1104,
"step": 232
},
{
"epoch": 0.7456,
"grad_norm": 0.20447863638401031,
"learning_rate": 5.25974025974026e-05,
"loss": 1.0514,
"step": 233
},
{
"epoch": 0.7488,
"grad_norm": 0.1974381059408188,
"learning_rate": 5.1948051948051944e-05,
"loss": 1.0048,
"step": 234
},
{
"epoch": 0.752,
"grad_norm": 0.21237170696258545,
"learning_rate": 5.1298701298701304e-05,
"loss": 1.1299,
"step": 235
},
{
"epoch": 0.7552,
"grad_norm": 0.21224971115589142,
"learning_rate": 5.064935064935065e-05,
"loss": 1.05,
"step": 236
},
{
"epoch": 0.7584,
"grad_norm": 0.19865018129348755,
"learning_rate": 5e-05,
"loss": 1.0665,
"step": 237
},
{
"epoch": 0.7616,
"grad_norm": 0.19199275970458984,
"learning_rate": 4.9350649350649355e-05,
"loss": 0.9531,
"step": 238
},
{
"epoch": 0.7648,
"grad_norm": 0.19573214650154114,
"learning_rate": 4.87012987012987e-05,
"loss": 1.0318,
"step": 239
},
{
"epoch": 0.768,
"grad_norm": 0.21338805556297302,
"learning_rate": 4.8051948051948054e-05,
"loss": 1.0343,
"step": 240
},
{
"epoch": 0.7712,
"grad_norm": 0.2254691869020462,
"learning_rate": 4.740259740259741e-05,
"loss": 1.0472,
"step": 241
},
{
"epoch": 0.7744,
"grad_norm": 0.18101665377616882,
"learning_rate": 4.675324675324675e-05,
"loss": 1.017,
"step": 242
},
{
"epoch": 0.7776,
"grad_norm": 0.22090592980384827,
"learning_rate": 4.6103896103896106e-05,
"loss": 1.0389,
"step": 243
},
{
"epoch": 0.7808,
"grad_norm": 0.20865507423877716,
"learning_rate": 4.545454545454546e-05,
"loss": 1.0369,
"step": 244
},
{
"epoch": 0.784,
"grad_norm": 0.21619610488414764,
"learning_rate": 4.4805194805194805e-05,
"loss": 1.109,
"step": 245
},
{
"epoch": 0.7872,
"grad_norm": 0.21694771945476532,
"learning_rate": 4.415584415584416e-05,
"loss": 1.0525,
"step": 246
},
{
"epoch": 0.7904,
"grad_norm": 0.2182662934064865,
"learning_rate": 4.3506493506493503e-05,
"loss": 1.0331,
"step": 247
},
{
"epoch": 0.7936,
"grad_norm": 0.2026486098766327,
"learning_rate": 4.2857142857142856e-05,
"loss": 1.027,
"step": 248
},
{
"epoch": 0.7968,
"grad_norm": 0.19606547057628632,
"learning_rate": 4.220779220779221e-05,
"loss": 1.0242,
"step": 249
},
{
"epoch": 0.8,
"grad_norm": 0.22107470035552979,
"learning_rate": 4.155844155844156e-05,
"loss": 1.0924,
"step": 250
},
{
"epoch": 0.8032,
"grad_norm": 0.19960008561611176,
"learning_rate": 4.0909090909090915e-05,
"loss": 1.0384,
"step": 251
},
{
"epoch": 0.8064,
"grad_norm": 0.1945488154888153,
"learning_rate": 4.025974025974026e-05,
"loss": 1.0673,
"step": 252
},
{
"epoch": 0.8096,
"grad_norm": 0.22067414224147797,
"learning_rate": 3.9610389610389614e-05,
"loss": 1.0426,
"step": 253
},
{
"epoch": 0.8128,
"grad_norm": 0.19010980427265167,
"learning_rate": 3.8961038961038966e-05,
"loss": 1.0617,
"step": 254
},
{
"epoch": 0.816,
"grad_norm": 0.18781176209449768,
"learning_rate": 3.831168831168831e-05,
"loss": 1.0243,
"step": 255
},
{
"epoch": 0.8192,
"grad_norm": 0.20388829708099365,
"learning_rate": 3.7662337662337665e-05,
"loss": 1.0476,
"step": 256
},
{
"epoch": 0.8224,
"grad_norm": 0.19911155104637146,
"learning_rate": 3.701298701298702e-05,
"loss": 1.0324,
"step": 257
},
{
"epoch": 0.8256,
"grad_norm": 0.19884039461612701,
"learning_rate": 3.6363636363636364e-05,
"loss": 1.0242,
"step": 258
},
{
"epoch": 0.8288,
"grad_norm": 0.19036105275154114,
"learning_rate": 3.571428571428572e-05,
"loss": 1.0323,
"step": 259
},
{
"epoch": 0.832,
"grad_norm": 0.20039844512939453,
"learning_rate": 3.506493506493507e-05,
"loss": 1.0749,
"step": 260
},
{
"epoch": 0.8352,
"grad_norm": 0.1899934560060501,
"learning_rate": 3.4415584415584416e-05,
"loss": 1.0115,
"step": 261
},
{
"epoch": 0.8384,
"grad_norm": 0.20019090175628662,
"learning_rate": 3.376623376623377e-05,
"loss": 1.0782,
"step": 262
},
{
"epoch": 0.8416,
"grad_norm": 0.2020583152770996,
"learning_rate": 3.311688311688312e-05,
"loss": 1.0687,
"step": 263
},
{
"epoch": 0.8448,
"grad_norm": 0.21407337486743927,
"learning_rate": 3.246753246753247e-05,
"loss": 1.1015,
"step": 264
},
{
"epoch": 0.848,
"grad_norm": 0.1871640682220459,
"learning_rate": 3.181818181818182e-05,
"loss": 0.9637,
"step": 265
},
{
"epoch": 0.8512,
"grad_norm": 0.21622811257839203,
"learning_rate": 3.1168831168831166e-05,
"loss": 1.1222,
"step": 266
},
{
"epoch": 0.8544,
"grad_norm": 0.22504661977291107,
"learning_rate": 3.051948051948052e-05,
"loss": 1.132,
"step": 267
},
{
"epoch": 0.8576,
"grad_norm": 0.19177629053592682,
"learning_rate": 2.9870129870129872e-05,
"loss": 1.0281,
"step": 268
},
{
"epoch": 0.8608,
"grad_norm": 0.1970544159412384,
"learning_rate": 2.922077922077922e-05,
"loss": 1.0393,
"step": 269
},
{
"epoch": 0.864,
"grad_norm": 0.21554522216320038,
"learning_rate": 2.857142857142857e-05,
"loss": 1.074,
"step": 270
},
{
"epoch": 0.8672,
"grad_norm": 0.21131229400634766,
"learning_rate": 2.792207792207792e-05,
"loss": 1.054,
"step": 271
},
{
"epoch": 0.8704,
"grad_norm": 0.19816523790359497,
"learning_rate": 2.7272727272727273e-05,
"loss": 1.0456,
"step": 272
},
{
"epoch": 0.8736,
"grad_norm": 0.21075209975242615,
"learning_rate": 2.6623376623376623e-05,
"loss": 1.0758,
"step": 273
},
{
"epoch": 0.8768,
"grad_norm": 0.2296527624130249,
"learning_rate": 2.5974025974025972e-05,
"loss": 1.0917,
"step": 274
},
{
"epoch": 0.88,
"grad_norm": 0.19722610712051392,
"learning_rate": 2.5324675324675325e-05,
"loss": 1.0704,
"step": 275
},
{
"epoch": 0.8832,
"grad_norm": 0.18721099197864532,
"learning_rate": 2.4675324675324678e-05,
"loss": 0.9919,
"step": 276
},
{
"epoch": 0.8864,
"grad_norm": 0.20244193077087402,
"learning_rate": 2.4025974025974027e-05,
"loss": 1.0368,
"step": 277
},
{
"epoch": 0.8896,
"grad_norm": 0.19518914818763733,
"learning_rate": 2.3376623376623376e-05,
"loss": 1.0436,
"step": 278
},
{
"epoch": 0.8928,
"grad_norm": 0.19650357961654663,
"learning_rate": 2.272727272727273e-05,
"loss": 1.0306,
"step": 279
},
{
"epoch": 0.896,
"grad_norm": 0.20320096611976624,
"learning_rate": 2.207792207792208e-05,
"loss": 1.0941,
"step": 280
},
{
"epoch": 0.8992,
"grad_norm": 0.18296951055526733,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.9802,
"step": 281
},
{
"epoch": 0.9024,
"grad_norm": 0.21357610821723938,
"learning_rate": 2.077922077922078e-05,
"loss": 1.0449,
"step": 282
},
{
"epoch": 0.9056,
"grad_norm": 0.193921759724617,
"learning_rate": 2.012987012987013e-05,
"loss": 1.0116,
"step": 283
},
{
"epoch": 0.9088,
"grad_norm": 0.1953902244567871,
"learning_rate": 1.9480519480519483e-05,
"loss": 1.0105,
"step": 284
},
{
"epoch": 0.912,
"grad_norm": 0.19440975785255432,
"learning_rate": 1.8831168831168833e-05,
"loss": 0.9952,
"step": 285
},
{
"epoch": 0.9152,
"grad_norm": 0.21054105460643768,
"learning_rate": 1.8181818181818182e-05,
"loss": 1.0701,
"step": 286
},
{
"epoch": 0.9184,
"grad_norm": 0.18844804167747498,
"learning_rate": 1.7532467532467535e-05,
"loss": 1.0146,
"step": 287
},
{
"epoch": 0.9216,
"grad_norm": 0.2067311704158783,
"learning_rate": 1.6883116883116884e-05,
"loss": 1.0781,
"step": 288
},
{
"epoch": 0.9248,
"grad_norm": 0.1941213756799698,
"learning_rate": 1.6233766233766234e-05,
"loss": 0.9814,
"step": 289
},
{
"epoch": 0.928,
"grad_norm": 0.22726193070411682,
"learning_rate": 1.5584415584415583e-05,
"loss": 1.1431,
"step": 290
},
{
"epoch": 0.9312,
"grad_norm": 0.18025581538677216,
"learning_rate": 1.4935064935064936e-05,
"loss": 0.9649,
"step": 291
},
{
"epoch": 0.9344,
"grad_norm": 0.21535000205039978,
"learning_rate": 1.4285714285714285e-05,
"loss": 1.0441,
"step": 292
},
{
"epoch": 0.9376,
"grad_norm": 0.20014546811580658,
"learning_rate": 1.3636363636363637e-05,
"loss": 1.0166,
"step": 293
},
{
"epoch": 0.9408,
"grad_norm": 0.22738787531852722,
"learning_rate": 1.2987012987012986e-05,
"loss": 1.0564,
"step": 294
},
{
"epoch": 0.944,
"grad_norm": 0.2020861804485321,
"learning_rate": 1.2337662337662339e-05,
"loss": 1.1241,
"step": 295
},
{
"epoch": 0.9472,
"grad_norm": 0.19888809323310852,
"learning_rate": 1.1688311688311688e-05,
"loss": 1.1114,
"step": 296
},
{
"epoch": 0.9504,
"grad_norm": 0.20912377536296844,
"learning_rate": 1.103896103896104e-05,
"loss": 1.0971,
"step": 297
},
{
"epoch": 0.9536,
"grad_norm": 0.21206621825695038,
"learning_rate": 1.038961038961039e-05,
"loss": 1.0601,
"step": 298
},
{
"epoch": 0.9568,
"grad_norm": 0.18667680025100708,
"learning_rate": 9.740259740259742e-06,
"loss": 1.0291,
"step": 299
},
{
"epoch": 0.96,
"grad_norm": 0.21125559508800507,
"learning_rate": 9.090909090909091e-06,
"loss": 1.0483,
"step": 300
},
{
"epoch": 0.9632,
"grad_norm": 0.21776145696640015,
"learning_rate": 8.441558441558442e-06,
"loss": 0.9912,
"step": 301
},
{
"epoch": 0.9664,
"grad_norm": 0.20144303143024445,
"learning_rate": 7.792207792207792e-06,
"loss": 1.0357,
"step": 302
},
{
"epoch": 0.9696,
"grad_norm": 0.1984029859304428,
"learning_rate": 7.142857142857143e-06,
"loss": 1.0648,
"step": 303
},
{
"epoch": 0.9728,
"grad_norm": 0.17972829937934875,
"learning_rate": 6.493506493506493e-06,
"loss": 1.0033,
"step": 304
},
{
"epoch": 0.976,
"grad_norm": 0.1818286031484604,
"learning_rate": 5.844155844155844e-06,
"loss": 0.997,
"step": 305
},
{
"epoch": 0.9792,
"grad_norm": 0.19670912623405457,
"learning_rate": 5.194805194805195e-06,
"loss": 1.0256,
"step": 306
},
{
"epoch": 0.9824,
"grad_norm": 0.20527283847332,
"learning_rate": 4.5454545454545455e-06,
"loss": 1.0348,
"step": 307
},
{
"epoch": 0.9856,
"grad_norm": 0.19025909900665283,
"learning_rate": 3.896103896103896e-06,
"loss": 1.0682,
"step": 308
},
{
"epoch": 0.9888,
"grad_norm": 0.19544818997383118,
"learning_rate": 3.2467532467532465e-06,
"loss": 0.9872,
"step": 309
},
{
"epoch": 0.992,
"grad_norm": 0.22112183272838593,
"learning_rate": 2.5974025974025976e-06,
"loss": 1.0661,
"step": 310
},
{
"epoch": 0.9952,
"grad_norm": 0.23328153789043427,
"learning_rate": 1.948051948051948e-06,
"loss": 1.0691,
"step": 311
},
{
"epoch": 0.9984,
"grad_norm": 0.20181375741958618,
"learning_rate": 1.2987012987012988e-06,
"loss": 0.9416,
"step": 312
},
{
"epoch": 1.0,
"grad_norm": 0.29312625527381897,
"learning_rate": 6.493506493506494e-07,
"loss": 1.1216,
"step": 313
},
{
"epoch": 0.12202467696492762,
"grad_norm": 0.2231415957212448,
"learning_rate": 0.0,
"loss": 1.0468,
"step": 314
},
{
"epoch": 0.12241329058583503,
"grad_norm": 0.22263288497924805,
"learning_rate": 0.00017594394706111328,
"loss": 1.0399,
"step": 315
},
{
"epoch": 0.12280190420674245,
"grad_norm": 0.22909891605377197,
"learning_rate": 0.00017586609575710393,
"loss": 1.1069,
"step": 316
},
{
"epoch": 0.12319051782764986,
"grad_norm": 0.23951445519924164,
"learning_rate": 0.0001757882444530946,
"loss": 1.1036,
"step": 317
},
{
"epoch": 0.12357913144855727,
"grad_norm": 0.2409268021583557,
"learning_rate": 0.00017571039314908526,
"loss": 1.1114,
"step": 318
},
{
"epoch": 0.12396774506946469,
"grad_norm": 0.23753899335861206,
"learning_rate": 0.00017563254184507592,
"loss": 1.1297,
"step": 319
},
{
"epoch": 0.12435635869037209,
"grad_norm": 0.2823902666568756,
"learning_rate": 0.00017555469054106657,
"loss": 1.1293,
"step": 320
},
{
"epoch": 0.12474497231127951,
"grad_norm": 0.24093545973300934,
"learning_rate": 0.00017547683923705722,
"loss": 1.0678,
"step": 321
},
{
"epoch": 0.12513358593218693,
"grad_norm": 0.22565563023090363,
"learning_rate": 0.0001753989879330479,
"loss": 1.1408,
"step": 322
},
{
"epoch": 0.12552219955309435,
"grad_norm": 0.22569572925567627,
"learning_rate": 0.00017532113662903855,
"loss": 1.0543,
"step": 323
},
{
"epoch": 0.12591081317400174,
"grad_norm": 0.24962866306304932,
"learning_rate": 0.0001752432853250292,
"loss": 1.0818,
"step": 324
},
{
"epoch": 0.12629942679490916,
"grad_norm": 0.22184576094150543,
"learning_rate": 0.00017516543402101986,
"loss": 1.0835,
"step": 325
},
{
"epoch": 0.12668804041581658,
"grad_norm": 0.2572194039821625,
"learning_rate": 0.0001750875827170105,
"loss": 1.0767,
"step": 326
},
{
"epoch": 0.127076654036724,
"grad_norm": 0.24131342768669128,
"learning_rate": 0.00017500973141300116,
"loss": 1.0981,
"step": 327
},
{
"epoch": 0.1274652676576314,
"grad_norm": 0.2386389970779419,
"learning_rate": 0.00017493188010899184,
"loss": 1.0828,
"step": 328
},
{
"epoch": 0.1278538812785388,
"grad_norm": 0.2654125690460205,
"learning_rate": 0.0001748540288049825,
"loss": 1.1266,
"step": 329
},
{
"epoch": 0.12824249489944622,
"grad_norm": 0.2925739884376526,
"learning_rate": 0.00017477617750097314,
"loss": 1.0983,
"step": 330
},
{
"epoch": 0.12863110852035364,
"grad_norm": 0.26589342951774597,
"learning_rate": 0.0001746983261969638,
"loss": 1.1029,
"step": 331
},
{
"epoch": 0.12901972214126106,
"grad_norm": 0.24565957486629486,
"learning_rate": 0.00017462047489295445,
"loss": 1.0975,
"step": 332
},
{
"epoch": 0.12940833576216845,
"grad_norm": 0.2459682673215866,
"learning_rate": 0.00017454262358894513,
"loss": 1.0566,
"step": 333
},
{
"epoch": 0.12979694938307587,
"grad_norm": 0.23349183797836304,
"learning_rate": 0.00017446477228493578,
"loss": 1.0833,
"step": 334
},
{
"epoch": 0.1301855630039833,
"grad_norm": 0.26166337728500366,
"learning_rate": 0.00017438692098092643,
"loss": 1.1598,
"step": 335
},
{
"epoch": 0.1305741766248907,
"grad_norm": 0.24188168346881866,
"learning_rate": 0.00017430906967691708,
"loss": 1.0728,
"step": 336
},
{
"epoch": 0.13096279024579813,
"grad_norm": 0.22922398149967194,
"learning_rate": 0.00017423121837290773,
"loss": 1.0311,
"step": 337
},
{
"epoch": 0.13135140386670552,
"grad_norm": 0.2652754485607147,
"learning_rate": 0.00017415336706889841,
"loss": 1.1096,
"step": 338
},
{
"epoch": 0.13174001748761294,
"grad_norm": 0.2355881780385971,
"learning_rate": 0.00017407551576488907,
"loss": 1.0964,
"step": 339
},
{
"epoch": 0.13212863110852036,
"grad_norm": 0.244523823261261,
"learning_rate": 0.00017399766446087972,
"loss": 1.142,
"step": 340
},
{
"epoch": 0.13251724472942777,
"grad_norm": 0.24705976247787476,
"learning_rate": 0.00017391981315687037,
"loss": 1.0943,
"step": 341
},
{
"epoch": 0.13290585835033517,
"grad_norm": 0.22817552089691162,
"learning_rate": 0.00017384196185286102,
"loss": 1.0621,
"step": 342
},
{
"epoch": 0.13329447197124258,
"grad_norm": 0.22605225443840027,
"learning_rate": 0.0001737641105488517,
"loss": 1.0714,
"step": 343
},
{
"epoch": 0.13368308559215,
"grad_norm": 0.2584545314311981,
"learning_rate": 0.00017368625924484235,
"loss": 1.1367,
"step": 344
},
{
"epoch": 0.13407169921305742,
"grad_norm": 0.2248220443725586,
"learning_rate": 0.000173608407940833,
"loss": 1.0872,
"step": 345
},
{
"epoch": 0.13446031283396484,
"grad_norm": 0.2141868770122528,
"learning_rate": 0.00017353055663682368,
"loss": 1.0572,
"step": 346
},
{
"epoch": 0.13484892645487223,
"grad_norm": 0.2615523934364319,
"learning_rate": 0.00017345270533281434,
"loss": 1.1048,
"step": 347
},
{
"epoch": 0.13523754007577965,
"grad_norm": 0.22990448772907257,
"learning_rate": 0.000173374854028805,
"loss": 1.0528,
"step": 348
},
{
"epoch": 0.13562615369668707,
"grad_norm": 0.2132262885570526,
"learning_rate": 0.00017329700272479564,
"loss": 1.0476,
"step": 349
},
{
"epoch": 0.1360147673175945,
"grad_norm": 0.2578272819519043,
"learning_rate": 0.00017321915142078632,
"loss": 1.0852,
"step": 350
},
{
"epoch": 0.1364033809385019,
"grad_norm": 0.22881457209587097,
"learning_rate": 0.00017314130011677697,
"loss": 1.1017,
"step": 351
},
{
"epoch": 0.1367919945594093,
"grad_norm": 0.21067696809768677,
"learning_rate": 0.00017306344881276762,
"loss": 1.0444,
"step": 352
},
{
"epoch": 0.13718060818031672,
"grad_norm": 0.2304215282201767,
"learning_rate": 0.0001729855975087583,
"loss": 1.0737,
"step": 353
},
{
"epoch": 0.13756922180122413,
"grad_norm": 0.2031925916671753,
"learning_rate": 0.00017290774620474895,
"loss": 1.0036,
"step": 354
},
{
"epoch": 0.13795783542213155,
"grad_norm": 0.27281051874160767,
"learning_rate": 0.0001728298949007396,
"loss": 1.148,
"step": 355
},
{
"epoch": 0.13834644904303897,
"grad_norm": 0.204191654920578,
"learning_rate": 0.00017275204359673026,
"loss": 0.9607,
"step": 356
},
{
"epoch": 0.13873506266394636,
"grad_norm": 0.221976637840271,
"learning_rate": 0.0001726741922927209,
"loss": 1.1068,
"step": 357
},
{
"epoch": 0.13912367628485378,
"grad_norm": 0.20831729471683502,
"learning_rate": 0.0001725963409887116,
"loss": 1.034,
"step": 358
},
{
"epoch": 0.1395122899057612,
"grad_norm": 0.21639779210090637,
"learning_rate": 0.00017251848968470224,
"loss": 1.0613,
"step": 359
},
{
"epoch": 0.13990090352666862,
"grad_norm": 0.1959424465894699,
"learning_rate": 0.0001724406383806929,
"loss": 1.0506,
"step": 360
},
{
"epoch": 0.140289517147576,
"grad_norm": 0.2044398933649063,
"learning_rate": 0.00017236278707668355,
"loss": 1.0316,
"step": 361
},
{
"epoch": 0.14067813076848343,
"grad_norm": 0.21483004093170166,
"learning_rate": 0.0001722849357726742,
"loss": 1.0361,
"step": 362
},
{
"epoch": 0.14106674438939085,
"grad_norm": 0.237701416015625,
"learning_rate": 0.00017220708446866485,
"loss": 1.1264,
"step": 363
},
{
"epoch": 0.14145535801029827,
"grad_norm": 0.20750795304775238,
"learning_rate": 0.00017212923316465553,
"loss": 1.0523,
"step": 364
},
{
"epoch": 0.14184397163120568,
"grad_norm": 0.2252965271472931,
"learning_rate": 0.00017205138186064618,
"loss": 1.0764,
"step": 365
},
{
"epoch": 0.14223258525211308,
"grad_norm": 0.2033565789461136,
"learning_rate": 0.00017197353055663683,
"loss": 1.064,
"step": 366
},
{
"epoch": 0.1426211988730205,
"grad_norm": 0.21123190224170685,
"learning_rate": 0.00017189567925262749,
"loss": 1.0515,
"step": 367
},
{
"epoch": 0.1430098124939279,
"grad_norm": 0.20646221935749054,
"learning_rate": 0.00017181782794861814,
"loss": 1.0617,
"step": 368
},
{
"epoch": 0.14339842611483533,
"grad_norm": 0.2079589068889618,
"learning_rate": 0.00017173997664460882,
"loss": 1.0569,
"step": 369
},
{
"epoch": 0.14378703973574275,
"grad_norm": 0.216246098279953,
"learning_rate": 0.00017166212534059947,
"loss": 1.0986,
"step": 370
},
{
"epoch": 0.14417565335665014,
"grad_norm": 0.20711806416511536,
"learning_rate": 0.00017158427403659012,
"loss": 1.1342,
"step": 371
},
{
"epoch": 0.14456426697755756,
"grad_norm": 0.235435351729393,
"learning_rate": 0.00017150642273258077,
"loss": 1.1082,
"step": 372
},
{
"epoch": 0.14495288059846498,
"grad_norm": 0.2273191511631012,
"learning_rate": 0.00017142857142857143,
"loss": 1.1064,
"step": 373
},
{
"epoch": 0.1453414942193724,
"grad_norm": 0.2075672745704651,
"learning_rate": 0.0001713507201245621,
"loss": 1.0536,
"step": 374
},
{
"epoch": 0.14573010784027982,
"grad_norm": 0.20764274895191193,
"learning_rate": 0.00017127286882055276,
"loss": 1.0673,
"step": 375
},
{
"epoch": 0.1461187214611872,
"grad_norm": 0.2441243678331375,
"learning_rate": 0.0001711950175165434,
"loss": 1.1271,
"step": 376
},
{
"epoch": 0.14650733508209463,
"grad_norm": 0.2383374124765396,
"learning_rate": 0.00017111716621253406,
"loss": 1.083,
"step": 377
},
{
"epoch": 0.14689594870300204,
"grad_norm": 0.2172410786151886,
"learning_rate": 0.0001710393149085247,
"loss": 1.0605,
"step": 378
},
{
"epoch": 0.14728456232390946,
"grad_norm": 0.22591541707515717,
"learning_rate": 0.0001709614636045154,
"loss": 1.0931,
"step": 379
},
{
"epoch": 0.14767317594481685,
"grad_norm": 0.23099495470523834,
"learning_rate": 0.00017088361230050604,
"loss": 1.1021,
"step": 380
},
{
"epoch": 0.14806178956572427,
"grad_norm": 0.21461094915866852,
"learning_rate": 0.0001708057609964967,
"loss": 1.0959,
"step": 381
},
{
"epoch": 0.1484504031866317,
"grad_norm": 0.21557241678237915,
"learning_rate": 0.00017072790969248735,
"loss": 1.0155,
"step": 382
},
{
"epoch": 0.1488390168075391,
"grad_norm": 0.234396293759346,
"learning_rate": 0.000170650058388478,
"loss": 1.1289,
"step": 383
},
{
"epoch": 0.14922763042844653,
"grad_norm": 0.22895503044128418,
"learning_rate": 0.00017057220708446868,
"loss": 0.9919,
"step": 384
},
{
"epoch": 0.14961624404935392,
"grad_norm": 0.2054683268070221,
"learning_rate": 0.00017049435578045933,
"loss": 1.0607,
"step": 385
},
{
"epoch": 0.15000485767026134,
"grad_norm": 0.25569215416908264,
"learning_rate": 0.00017041650447644998,
"loss": 1.0517,
"step": 386
},
{
"epoch": 0.15039347129116876,
"grad_norm": 0.2222641259431839,
"learning_rate": 0.00017033865317244064,
"loss": 1.0404,
"step": 387
},
{
"epoch": 0.15078208491207618,
"grad_norm": 0.20501169562339783,
"learning_rate": 0.0001702608018684313,
"loss": 0.9897,
"step": 388
},
{
"epoch": 0.1511706985329836,
"grad_norm": 0.22080403566360474,
"learning_rate": 0.00017018295056442197,
"loss": 1.1013,
"step": 389
},
{
"epoch": 0.15155931215389098,
"grad_norm": 0.21218529343605042,
"learning_rate": 0.00017010509926041262,
"loss": 1.0541,
"step": 390
},
{
"epoch": 0.1519479257747984,
"grad_norm": 0.23064807057380676,
"learning_rate": 0.00017002724795640327,
"loss": 1.037,
"step": 391
},
{
"epoch": 0.15233653939570582,
"grad_norm": 0.21164493262767792,
"learning_rate": 0.00016994939665239392,
"loss": 1.0769,
"step": 392
},
{
"epoch": 0.15272515301661324,
"grad_norm": 0.22565549612045288,
"learning_rate": 0.00016987154534838457,
"loss": 1.0638,
"step": 393
},
{
"epoch": 0.15311376663752063,
"grad_norm": 0.22492647171020508,
"learning_rate": 0.00016979369404437525,
"loss": 1.063,
"step": 394
},
{
"epoch": 0.15350238025842805,
"grad_norm": 0.22335395216941833,
"learning_rate": 0.0001697158427403659,
"loss": 1.1032,
"step": 395
},
{
"epoch": 0.15389099387933547,
"grad_norm": 0.2164154201745987,
"learning_rate": 0.00016963799143635656,
"loss": 1.1275,
"step": 396
},
{
"epoch": 0.1542796075002429,
"grad_norm": 0.22547736763954163,
"learning_rate": 0.0001695601401323472,
"loss": 1.1324,
"step": 397
},
{
"epoch": 0.1546682211211503,
"grad_norm": 0.2028045952320099,
"learning_rate": 0.0001694822888283379,
"loss": 1.0057,
"step": 398
},
{
"epoch": 0.1550568347420577,
"grad_norm": 0.20770573616027832,
"learning_rate": 0.00016940443752432854,
"loss": 1.0311,
"step": 399
},
{
"epoch": 0.15544544836296512,
"grad_norm": 0.2231476902961731,
"learning_rate": 0.0001693265862203192,
"loss": 1.0535,
"step": 400
},
{
"epoch": 0.15583406198387253,
"grad_norm": 0.21618099510669708,
"learning_rate": 0.00016924873491630987,
"loss": 1.0616,
"step": 401
},
{
"epoch": 0.15622267560477995,
"grad_norm": 0.24024419486522675,
"learning_rate": 0.00016917088361230052,
"loss": 1.1324,
"step": 402
},
{
"epoch": 0.15661128922568737,
"grad_norm": 0.2002171128988266,
"learning_rate": 0.00016909303230829118,
"loss": 1.015,
"step": 403
},
{
"epoch": 0.15699990284659476,
"grad_norm": 0.21771477162837982,
"learning_rate": 0.00016901518100428183,
"loss": 1.0817,
"step": 404
},
{
"epoch": 0.15738851646750218,
"grad_norm": 0.22052259743213654,
"learning_rate": 0.0001689373297002725,
"loss": 1.0836,
"step": 405
},
{
"epoch": 0.1577771300884096,
"grad_norm": 0.1964062750339508,
"learning_rate": 0.00016885947839626316,
"loss": 1.0505,
"step": 406
},
{
"epoch": 0.15816574370931702,
"grad_norm": 0.22714298963546753,
"learning_rate": 0.0001687816270922538,
"loss": 1.0702,
"step": 407
},
{
"epoch": 0.15855435733022444,
"grad_norm": 0.20647728443145752,
"learning_rate": 0.00016870377578824446,
"loss": 1.0349,
"step": 408
},
{
"epoch": 0.15894297095113183,
"grad_norm": 0.2355160117149353,
"learning_rate": 0.00016862592448423512,
"loss": 1.0305,
"step": 409
},
{
"epoch": 0.15933158457203925,
"grad_norm": 0.22890770435333252,
"learning_rate": 0.0001685480731802258,
"loss": 1.0854,
"step": 410
},
{
"epoch": 0.15972019819294667,
"grad_norm": 0.21947838366031647,
"learning_rate": 0.00016847022187621645,
"loss": 1.0948,
"step": 411
},
{
"epoch": 0.16010881181385409,
"grad_norm": 0.22334899008274078,
"learning_rate": 0.0001683923705722071,
"loss": 1.006,
"step": 412
},
{
"epoch": 0.16049742543476148,
"grad_norm": 0.22324936091899872,
"learning_rate": 0.00016831451926819775,
"loss": 1.0402,
"step": 413
},
{
"epoch": 0.1608860390556689,
"grad_norm": 0.21462097764015198,
"learning_rate": 0.0001682366679641884,
"loss": 1.077,
"step": 414
},
{
"epoch": 0.1612746526765763,
"grad_norm": 0.24567006528377533,
"learning_rate": 0.00016815881666017908,
"loss": 1.15,
"step": 415
},
{
"epoch": 0.16166326629748373,
"grad_norm": 0.26437243819236755,
"learning_rate": 0.00016808096535616973,
"loss": 1.1251,
"step": 416
},
{
"epoch": 0.16205187991839115,
"grad_norm": 0.2217959761619568,
"learning_rate": 0.00016800311405216039,
"loss": 1.1103,
"step": 417
},
{
"epoch": 0.16244049353929854,
"grad_norm": 0.24402475357055664,
"learning_rate": 0.00016792526274815104,
"loss": 1.0672,
"step": 418
},
{
"epoch": 0.16282910716020596,
"grad_norm": 0.21609526872634888,
"learning_rate": 0.0001678474114441417,
"loss": 1.0291,
"step": 419
},
{
"epoch": 0.16321772078111338,
"grad_norm": 0.20054642856121063,
"learning_rate": 0.00016776956014013237,
"loss": 1.0704,
"step": 420
},
{
"epoch": 0.1636063344020208,
"grad_norm": 0.22864869236946106,
"learning_rate": 0.00016769170883612302,
"loss": 1.0612,
"step": 421
},
{
"epoch": 0.16399494802292822,
"grad_norm": 0.22651974856853485,
"learning_rate": 0.00016761385753211367,
"loss": 1.0749,
"step": 422
},
{
"epoch": 0.1643835616438356,
"grad_norm": 0.21587328612804413,
"learning_rate": 0.00016753600622810433,
"loss": 1.0398,
"step": 423
},
{
"epoch": 0.16477217526474303,
"grad_norm": 0.1953774094581604,
"learning_rate": 0.00016745815492409498,
"loss": 1.0275,
"step": 424
},
{
"epoch": 0.16516078888565044,
"grad_norm": 0.21803410351276398,
"learning_rate": 0.00016738030362008566,
"loss": 1.1219,
"step": 425
},
{
"epoch": 0.16554940250655786,
"grad_norm": 0.2034682035446167,
"learning_rate": 0.0001673024523160763,
"loss": 1.0342,
"step": 426
},
{
"epoch": 0.16593801612746525,
"grad_norm": 0.20135951042175293,
"learning_rate": 0.00016722460101206696,
"loss": 0.9802,
"step": 427
},
{
"epoch": 0.16632662974837267,
"grad_norm": 0.23310376703739166,
"learning_rate": 0.0001671467497080576,
"loss": 1.0789,
"step": 428
},
{
"epoch": 0.1667152433692801,
"grad_norm": 0.21475404500961304,
"learning_rate": 0.00016706889840404827,
"loss": 1.0416,
"step": 429
},
{
"epoch": 0.1671038569901875,
"grad_norm": 0.21661072969436646,
"learning_rate": 0.00016699104710003894,
"loss": 1.0568,
"step": 430
},
{
"epoch": 0.16749247061109493,
"grad_norm": 0.20310629904270172,
"learning_rate": 0.0001669131957960296,
"loss": 0.9968,
"step": 431
},
{
"epoch": 0.16788108423200232,
"grad_norm": 0.2596947252750397,
"learning_rate": 0.00016683534449202025,
"loss": 1.0478,
"step": 432
},
{
"epoch": 0.16826969785290974,
"grad_norm": 0.22226987779140472,
"learning_rate": 0.0001667574931880109,
"loss": 1.0898,
"step": 433
},
{
"epoch": 0.16865831147381716,
"grad_norm": 0.22499911487102509,
"learning_rate": 0.00016667964188400155,
"loss": 1.07,
"step": 434
},
{
"epoch": 0.16904692509472458,
"grad_norm": 0.2717292308807373,
"learning_rate": 0.0001666017905799922,
"loss": 1.0562,
"step": 435
},
{
"epoch": 0.169435538715632,
"grad_norm": 0.22052323818206787,
"learning_rate": 0.00016652393927598288,
"loss": 1.0732,
"step": 436
},
{
"epoch": 0.16982415233653939,
"grad_norm": 0.21741728484630585,
"learning_rate": 0.00016644608797197354,
"loss": 1.0409,
"step": 437
},
{
"epoch": 0.1702127659574468,
"grad_norm": 0.20701193809509277,
"learning_rate": 0.0001663682366679642,
"loss": 1.0731,
"step": 438
},
{
"epoch": 0.17060137957835422,
"grad_norm": 0.22071130573749542,
"learning_rate": 0.00016629038536395484,
"loss": 1.0992,
"step": 439
},
{
"epoch": 0.17098999319926164,
"grad_norm": 0.20261412858963013,
"learning_rate": 0.0001662125340599455,
"loss": 1.0051,
"step": 440
},
{
"epoch": 0.17137860682016906,
"grad_norm": 0.2082947939634323,
"learning_rate": 0.00016613468275593617,
"loss": 1.0477,
"step": 441
},
{
"epoch": 0.17176722044107645,
"grad_norm": 0.22534717619419098,
"learning_rate": 0.00016605683145192682,
"loss": 1.041,
"step": 442
},
{
"epoch": 0.17215583406198387,
"grad_norm": 0.21547731757164001,
"learning_rate": 0.00016597898014791748,
"loss": 1.0528,
"step": 443
},
{
"epoch": 0.1725444476828913,
"grad_norm": 0.24141089618206024,
"learning_rate": 0.00016590112884390813,
"loss": 1.0928,
"step": 444
},
{
"epoch": 0.1729330613037987,
"grad_norm": 0.21910884976387024,
"learning_rate": 0.00016582327753989878,
"loss": 1.063,
"step": 445
},
{
"epoch": 0.1733216749247061,
"grad_norm": 0.21782316267490387,
"learning_rate": 0.00016574542623588946,
"loss": 1.0976,
"step": 446
},
{
"epoch": 0.17371028854561352,
"grad_norm": 0.21771778166294098,
"learning_rate": 0.0001656675749318801,
"loss": 1.0677,
"step": 447
},
{
"epoch": 0.17409890216652094,
"grad_norm": 0.22117659449577332,
"learning_rate": 0.00016558972362787076,
"loss": 1.0669,
"step": 448
},
{
"epoch": 0.17448751578742835,
"grad_norm": 0.21918092668056488,
"learning_rate": 0.00016551187232386141,
"loss": 1.0955,
"step": 449
},
{
"epoch": 0.17487612940833577,
"grad_norm": 0.22027818858623505,
"learning_rate": 0.0001654340210198521,
"loss": 1.0201,
"step": 450
},
{
"epoch": 0.17526474302924316,
"grad_norm": 0.2042885720729828,
"learning_rate": 0.00016535616971584275,
"loss": 1.0881,
"step": 451
},
{
"epoch": 0.17565335665015058,
"grad_norm": 0.21788261830806732,
"learning_rate": 0.0001652783184118334,
"loss": 1.0918,
"step": 452
},
{
"epoch": 0.176041970271058,
"grad_norm": 0.23332571983337402,
"learning_rate": 0.00016520046710782408,
"loss": 1.091,
"step": 453
},
{
"epoch": 0.17643058389196542,
"grad_norm": 0.20204192399978638,
"learning_rate": 0.00016512261580381473,
"loss": 1.0366,
"step": 454
},
{
"epoch": 0.17681919751287284,
"grad_norm": 0.21761906147003174,
"learning_rate": 0.00016504476449980538,
"loss": 1.0131,
"step": 455
},
{
"epoch": 0.17720781113378023,
"grad_norm": 0.2152051478624344,
"learning_rate": 0.00016496691319579606,
"loss": 1.0868,
"step": 456
},
{
"epoch": 0.17759642475468765,
"grad_norm": 0.22776494920253754,
"learning_rate": 0.0001648890618917867,
"loss": 1.0807,
"step": 457
},
{
"epoch": 0.17798503837559507,
"grad_norm": 0.2171342968940735,
"learning_rate": 0.00016481121058777736,
"loss": 1.0537,
"step": 458
},
{
"epoch": 0.17837365199650249,
"grad_norm": 0.2046273946762085,
"learning_rate": 0.00016473335928376802,
"loss": 1.0097,
"step": 459
},
{
"epoch": 0.17876226561740988,
"grad_norm": 0.2047681361436844,
"learning_rate": 0.00016465550797975867,
"loss": 1.0204,
"step": 460
},
{
"epoch": 0.1791508792383173,
"grad_norm": 0.1876862645149231,
"learning_rate": 0.00016457765667574935,
"loss": 0.9383,
"step": 461
},
{
"epoch": 0.17953949285922471,
"grad_norm": 0.218430757522583,
"learning_rate": 0.00016449980537174,
"loss": 1.0721,
"step": 462
},
{
"epoch": 0.17992810648013213,
"grad_norm": 0.2245480865240097,
"learning_rate": 0.00016442195406773065,
"loss": 1.0859,
"step": 463
},
{
"epoch": 0.18031672010103955,
"grad_norm": 0.22577151656150818,
"learning_rate": 0.0001643441027637213,
"loss": 1.0825,
"step": 464
},
{
"epoch": 0.18070533372194694,
"grad_norm": 0.20132745802402496,
"learning_rate": 0.00016426625145971196,
"loss": 1.0615,
"step": 465
},
{
"epoch": 0.18109394734285436,
"grad_norm": 0.2277505248785019,
"learning_rate": 0.00016418840015570263,
"loss": 1.0426,
"step": 466
},
{
"epoch": 0.18148256096376178,
"grad_norm": 0.22540105879306793,
"learning_rate": 0.0001641105488516933,
"loss": 1.0481,
"step": 467
},
{
"epoch": 0.1818711745846692,
"grad_norm": 0.20358088612556458,
"learning_rate": 0.00016403269754768394,
"loss": 1.0286,
"step": 468
},
{
"epoch": 0.18225978820557662,
"grad_norm": 0.22534145414829254,
"learning_rate": 0.0001639548462436746,
"loss": 1.1183,
"step": 469
},
{
"epoch": 0.182648401826484,
"grad_norm": 0.2188873142004013,
"learning_rate": 0.00016387699493966524,
"loss": 1.0439,
"step": 470
},
{
"epoch": 0.18303701544739143,
"grad_norm": 0.2128048539161682,
"learning_rate": 0.00016379914363565592,
"loss": 1.027,
"step": 471
},
{
"epoch": 0.18342562906829885,
"grad_norm": 0.2518141567707062,
"learning_rate": 0.00016372129233164657,
"loss": 1.0468,
"step": 472
},
{
"epoch": 0.18381424268920626,
"grad_norm": 0.2189142256975174,
"learning_rate": 0.00016364344102763723,
"loss": 1.0581,
"step": 473
},
{
"epoch": 0.18420285631011368,
"grad_norm": 0.31266725063323975,
"learning_rate": 0.00016356558972362788,
"loss": 1.0554,
"step": 474
},
{
"epoch": 0.18459146993102107,
"grad_norm": 0.21343916654586792,
"learning_rate": 0.00016348773841961853,
"loss": 1.0795,
"step": 475
},
{
"epoch": 0.1849800835519285,
"grad_norm": 0.22907280921936035,
"learning_rate": 0.00016340988711560918,
"loss": 1.0304,
"step": 476
},
{
"epoch": 0.1853686971728359,
"grad_norm": 0.2105257511138916,
"learning_rate": 0.00016333203581159986,
"loss": 1.0231,
"step": 477
},
{
"epoch": 0.18575731079374333,
"grad_norm": 0.19537831842899323,
"learning_rate": 0.00016325418450759051,
"loss": 1.0103,
"step": 478
},
{
"epoch": 0.18614592441465072,
"grad_norm": 0.20522372424602509,
"learning_rate": 0.00016317633320358117,
"loss": 1.0196,
"step": 479
},
{
"epoch": 0.18653453803555814,
"grad_norm": 0.21646477282047272,
"learning_rate": 0.00016309848189957182,
"loss": 1.0579,
"step": 480
},
{
"epoch": 0.18692315165646556,
"grad_norm": 0.21077193319797516,
"learning_rate": 0.00016302063059556247,
"loss": 1.0638,
"step": 481
},
{
"epoch": 0.18731176527737298,
"grad_norm": 0.20357473194599152,
"learning_rate": 0.00016294277929155315,
"loss": 1.0635,
"step": 482
},
{
"epoch": 0.1877003788982804,
"grad_norm": 0.2188001275062561,
"learning_rate": 0.0001628649279875438,
"loss": 1.0267,
"step": 483
},
{
"epoch": 0.1880889925191878,
"grad_norm": 0.2128928154706955,
"learning_rate": 0.00016278707668353445,
"loss": 0.9706,
"step": 484
},
{
"epoch": 0.1884776061400952,
"grad_norm": 0.22081372141838074,
"learning_rate": 0.0001627092253795251,
"loss": 1.08,
"step": 485
},
{
"epoch": 0.18886621976100262,
"grad_norm": 0.2250615805387497,
"learning_rate": 0.00016263137407551576,
"loss": 1.1451,
"step": 486
},
{
"epoch": 0.18925483338191004,
"grad_norm": 0.1984967589378357,
"learning_rate": 0.00016255352277150644,
"loss": 1.0744,
"step": 487
},
{
"epoch": 0.18964344700281746,
"grad_norm": 0.20778900384902954,
"learning_rate": 0.0001624756714674971,
"loss": 1.0623,
"step": 488
},
{
"epoch": 0.19003206062372485,
"grad_norm": 0.2026563137769699,
"learning_rate": 0.00016239782016348774,
"loss": 1.0714,
"step": 489
},
{
"epoch": 0.19042067424463227,
"grad_norm": 0.21598374843597412,
"learning_rate": 0.0001623199688594784,
"loss": 1.0869,
"step": 490
},
{
"epoch": 0.1908092878655397,
"grad_norm": 0.18944978713989258,
"learning_rate": 0.00016224211755546904,
"loss": 1.055,
"step": 491
},
{
"epoch": 0.1911979014864471,
"grad_norm": 0.20698946714401245,
"learning_rate": 0.00016216426625145972,
"loss": 1.0392,
"step": 492
},
{
"epoch": 0.1915865151073545,
"grad_norm": 0.22395353019237518,
"learning_rate": 0.00016208641494745038,
"loss": 1.0681,
"step": 493
},
{
"epoch": 0.19197512872826192,
"grad_norm": 0.22372962534427643,
"learning_rate": 0.00016200856364344103,
"loss": 1.0767,
"step": 494
},
{
"epoch": 0.19236374234916934,
"grad_norm": 0.2066701054573059,
"learning_rate": 0.00016193071233943168,
"loss": 1.0061,
"step": 495
},
{
"epoch": 0.19275235597007676,
"grad_norm": 0.19716408848762512,
"learning_rate": 0.00016185286103542233,
"loss": 1.039,
"step": 496
},
{
"epoch": 0.19314096959098417,
"grad_norm": 0.22159601747989655,
"learning_rate": 0.000161775009731413,
"loss": 1.0832,
"step": 497
},
{
"epoch": 0.19352958321189156,
"grad_norm": 0.21509626507759094,
"learning_rate": 0.00016169715842740366,
"loss": 1.0264,
"step": 498
},
{
"epoch": 0.19391819683279898,
"grad_norm": 0.21598199009895325,
"learning_rate": 0.00016161930712339431,
"loss": 1.049,
"step": 499
},
{
"epoch": 0.1943068104537064,
"grad_norm": 0.20279590785503387,
"learning_rate": 0.00016154145581938497,
"loss": 1.0505,
"step": 500
}
],
"logging_steps": 1,
"max_steps": 2574,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.3571235778270986e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}