OLMo-2-1124-7B-Instruct_SFTv02.03 / trainer_state.json
Neelectric's picture
Model save
a6e3d1a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9967637540453075,
"eval_steps": 500,
"global_step": 154,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006472491909385114,
"grad_norm": 5.306360721588135,
"learning_rate": 0.0,
"loss": 1.1501,
"num_tokens": 963559.0,
"step": 1
},
{
"epoch": 0.012944983818770227,
"grad_norm": 5.30866003036499,
"learning_rate": 6.25e-08,
"loss": 1.1455,
"num_tokens": 1927760.0,
"step": 2
},
{
"epoch": 0.019417475728155338,
"grad_norm": 5.259037971496582,
"learning_rate": 1.25e-07,
"loss": 1.1393,
"num_tokens": 2896369.0,
"step": 3
},
{
"epoch": 0.025889967637540454,
"grad_norm": 5.178048610687256,
"learning_rate": 1.875e-07,
"loss": 1.133,
"num_tokens": 3886583.0,
"step": 4
},
{
"epoch": 0.032362459546925564,
"grad_norm": 5.211574554443359,
"learning_rate": 2.5e-07,
"loss": 1.1368,
"num_tokens": 4864476.0,
"step": 5
},
{
"epoch": 0.038834951456310676,
"grad_norm": 5.201828479766846,
"learning_rate": 3.1249999999999997e-07,
"loss": 1.1455,
"num_tokens": 5873295.0,
"step": 6
},
{
"epoch": 0.045307443365695796,
"grad_norm": 5.131554126739502,
"learning_rate": 3.75e-07,
"loss": 1.1259,
"num_tokens": 6876484.0,
"step": 7
},
{
"epoch": 0.05177993527508091,
"grad_norm": 5.083103179931641,
"learning_rate": 4.375e-07,
"loss": 1.1167,
"num_tokens": 7871664.0,
"step": 8
},
{
"epoch": 0.05825242718446602,
"grad_norm": 5.106949806213379,
"learning_rate": 5e-07,
"loss": 1.1324,
"num_tokens": 8869931.0,
"step": 9
},
{
"epoch": 0.06472491909385113,
"grad_norm": 4.861895561218262,
"learning_rate": 5.625e-07,
"loss": 1.1082,
"num_tokens": 9874687.0,
"step": 10
},
{
"epoch": 0.07119741100323625,
"grad_norm": 4.900995254516602,
"learning_rate": 6.249999999999999e-07,
"loss": 1.1035,
"num_tokens": 10842628.0,
"step": 11
},
{
"epoch": 0.07766990291262135,
"grad_norm": 4.93906307220459,
"learning_rate": 6.875e-07,
"loss": 1.1207,
"num_tokens": 11815438.0,
"step": 12
},
{
"epoch": 0.08414239482200647,
"grad_norm": 4.3877153396606445,
"learning_rate": 7.5e-07,
"loss": 1.0708,
"num_tokens": 12788222.0,
"step": 13
},
{
"epoch": 0.09061488673139159,
"grad_norm": 4.357248306274414,
"learning_rate": 8.125e-07,
"loss": 1.0679,
"num_tokens": 13756270.0,
"step": 14
},
{
"epoch": 0.0970873786407767,
"grad_norm": 4.3989644050598145,
"learning_rate": 8.75e-07,
"loss": 1.068,
"num_tokens": 14689813.0,
"step": 15
},
{
"epoch": 0.10355987055016182,
"grad_norm": 4.206644535064697,
"learning_rate": 9.374999999999999e-07,
"loss": 1.0545,
"num_tokens": 15670657.0,
"step": 16
},
{
"epoch": 0.11003236245954692,
"grad_norm": 4.120600700378418,
"learning_rate": 1e-06,
"loss": 1.0357,
"num_tokens": 16640491.0,
"step": 17
},
{
"epoch": 0.11650485436893204,
"grad_norm": 3.066406011581421,
"learning_rate": 1e-06,
"loss": 0.9571,
"num_tokens": 17611388.0,
"step": 18
},
{
"epoch": 0.12297734627831715,
"grad_norm": 3.0390701293945312,
"learning_rate": 1e-06,
"loss": 0.9557,
"num_tokens": 18582653.0,
"step": 19
},
{
"epoch": 0.12944983818770225,
"grad_norm": 2.996507406234741,
"learning_rate": 1e-06,
"loss": 0.9467,
"num_tokens": 19557459.0,
"step": 20
},
{
"epoch": 0.13592233009708737,
"grad_norm": 2.852745771408081,
"learning_rate": 1e-06,
"loss": 0.9414,
"num_tokens": 20524158.0,
"step": 21
},
{
"epoch": 0.1423948220064725,
"grad_norm": 2.7312402725219727,
"learning_rate": 1e-06,
"loss": 0.9161,
"num_tokens": 21502674.0,
"step": 22
},
{
"epoch": 0.1488673139158576,
"grad_norm": 2.712592124938965,
"learning_rate": 1e-06,
"loss": 0.9201,
"num_tokens": 22446294.0,
"step": 23
},
{
"epoch": 0.1553398058252427,
"grad_norm": 2.51646089553833,
"learning_rate": 1e-06,
"loss": 0.8971,
"num_tokens": 23430225.0,
"step": 24
},
{
"epoch": 0.16181229773462782,
"grad_norm": 1.9695550203323364,
"learning_rate": 1e-06,
"loss": 0.8188,
"num_tokens": 24380419.0,
"step": 25
},
{
"epoch": 0.16828478964401294,
"grad_norm": 1.7356574535369873,
"learning_rate": 1e-06,
"loss": 0.7798,
"num_tokens": 25343499.0,
"step": 26
},
{
"epoch": 0.17475728155339806,
"grad_norm": 1.5638344287872314,
"learning_rate": 1e-06,
"loss": 0.7747,
"num_tokens": 26315921.0,
"step": 27
},
{
"epoch": 0.18122977346278318,
"grad_norm": 1.524434208869934,
"learning_rate": 1e-06,
"loss": 0.7787,
"num_tokens": 27255686.0,
"step": 28
},
{
"epoch": 0.18770226537216828,
"grad_norm": 1.3938937187194824,
"learning_rate": 1e-06,
"loss": 0.7647,
"num_tokens": 28232449.0,
"step": 29
},
{
"epoch": 0.1941747572815534,
"grad_norm": 1.3155360221862793,
"learning_rate": 1e-06,
"loss": 0.7544,
"num_tokens": 29213099.0,
"step": 30
},
{
"epoch": 0.20064724919093851,
"grad_norm": 1.1851674318313599,
"learning_rate": 1e-06,
"loss": 0.751,
"num_tokens": 30210546.0,
"step": 31
},
{
"epoch": 0.20711974110032363,
"grad_norm": 1.0744044780731201,
"learning_rate": 1e-06,
"loss": 0.7319,
"num_tokens": 31190215.0,
"step": 32
},
{
"epoch": 0.21359223300970873,
"grad_norm": 1.027134895324707,
"learning_rate": 1e-06,
"loss": 0.7286,
"num_tokens": 32152368.0,
"step": 33
},
{
"epoch": 0.22006472491909385,
"grad_norm": 0.9877621531486511,
"learning_rate": 1e-06,
"loss": 0.7382,
"num_tokens": 33138410.0,
"step": 34
},
{
"epoch": 0.22653721682847897,
"grad_norm": 0.9485065937042236,
"learning_rate": 1e-06,
"loss": 0.7276,
"num_tokens": 34103559.0,
"step": 35
},
{
"epoch": 0.23300970873786409,
"grad_norm": 0.8876718282699585,
"learning_rate": 1e-06,
"loss": 0.7243,
"num_tokens": 35099702.0,
"step": 36
},
{
"epoch": 0.23948220064724918,
"grad_norm": 0.781726598739624,
"learning_rate": 1e-06,
"loss": 0.715,
"num_tokens": 36067246.0,
"step": 37
},
{
"epoch": 0.2459546925566343,
"grad_norm": 0.6435908675193787,
"learning_rate": 1e-06,
"loss": 0.6923,
"num_tokens": 37074157.0,
"step": 38
},
{
"epoch": 0.2524271844660194,
"grad_norm": 0.5644338130950928,
"learning_rate": 1e-06,
"loss": 0.6791,
"num_tokens": 38026864.0,
"step": 39
},
{
"epoch": 0.2588996763754045,
"grad_norm": 0.5346189141273499,
"learning_rate": 1e-06,
"loss": 0.6658,
"num_tokens": 39007950.0,
"step": 40
},
{
"epoch": 0.26537216828478966,
"grad_norm": 0.536400556564331,
"learning_rate": 1e-06,
"loss": 0.673,
"num_tokens": 40004404.0,
"step": 41
},
{
"epoch": 0.27184466019417475,
"grad_norm": 0.5293827652931213,
"learning_rate": 1e-06,
"loss": 0.6625,
"num_tokens": 40979367.0,
"step": 42
},
{
"epoch": 0.2783171521035599,
"grad_norm": 0.5181026458740234,
"learning_rate": 1e-06,
"loss": 0.6512,
"num_tokens": 41954716.0,
"step": 43
},
{
"epoch": 0.284789644012945,
"grad_norm": 0.4992174506187439,
"learning_rate": 1e-06,
"loss": 0.6621,
"num_tokens": 42927994.0,
"step": 44
},
{
"epoch": 0.2912621359223301,
"grad_norm": 0.47395315766334534,
"learning_rate": 1e-06,
"loss": 0.6478,
"num_tokens": 43870458.0,
"step": 45
},
{
"epoch": 0.2977346278317152,
"grad_norm": 0.41670018434524536,
"learning_rate": 1e-06,
"loss": 0.6411,
"num_tokens": 44845205.0,
"step": 46
},
{
"epoch": 0.3042071197411003,
"grad_norm": 0.3809607923030853,
"learning_rate": 1e-06,
"loss": 0.6372,
"num_tokens": 45817785.0,
"step": 47
},
{
"epoch": 0.3106796116504854,
"grad_norm": 0.36747702956199646,
"learning_rate": 1e-06,
"loss": 0.6403,
"num_tokens": 46768552.0,
"step": 48
},
{
"epoch": 0.31715210355987056,
"grad_norm": 0.34375864267349243,
"learning_rate": 1e-06,
"loss": 0.6193,
"num_tokens": 47720390.0,
"step": 49
},
{
"epoch": 0.32362459546925565,
"grad_norm": 0.3278081715106964,
"learning_rate": 1e-06,
"loss": 0.6277,
"num_tokens": 48659119.0,
"step": 50
},
{
"epoch": 0.3300970873786408,
"grad_norm": 0.3173641860485077,
"learning_rate": 1e-06,
"loss": 0.6262,
"num_tokens": 49635896.0,
"step": 51
},
{
"epoch": 0.3365695792880259,
"grad_norm": 0.3114546239376068,
"learning_rate": 1e-06,
"loss": 0.6229,
"num_tokens": 50617481.0,
"step": 52
},
{
"epoch": 0.343042071197411,
"grad_norm": 0.31268301606178284,
"learning_rate": 1e-06,
"loss": 0.6187,
"num_tokens": 51580322.0,
"step": 53
},
{
"epoch": 0.34951456310679613,
"grad_norm": 0.29753509163856506,
"learning_rate": 1e-06,
"loss": 0.6176,
"num_tokens": 52544983.0,
"step": 54
},
{
"epoch": 0.3559870550161812,
"grad_norm": 0.2916501760482788,
"learning_rate": 1e-06,
"loss": 0.6086,
"num_tokens": 53538431.0,
"step": 55
},
{
"epoch": 0.36245954692556637,
"grad_norm": 0.2825452983379364,
"learning_rate": 1e-06,
"loss": 0.6077,
"num_tokens": 54497097.0,
"step": 56
},
{
"epoch": 0.36893203883495146,
"grad_norm": 0.264753133058548,
"learning_rate": 1e-06,
"loss": 0.5966,
"num_tokens": 55515695.0,
"step": 57
},
{
"epoch": 0.37540453074433655,
"grad_norm": 0.25653982162475586,
"learning_rate": 1e-06,
"loss": 0.5997,
"num_tokens": 56471100.0,
"step": 58
},
{
"epoch": 0.3818770226537217,
"grad_norm": 0.2426663488149643,
"learning_rate": 1e-06,
"loss": 0.603,
"num_tokens": 57460948.0,
"step": 59
},
{
"epoch": 0.3883495145631068,
"grad_norm": 0.22974367439746857,
"learning_rate": 1e-06,
"loss": 0.5993,
"num_tokens": 58450663.0,
"step": 60
},
{
"epoch": 0.3948220064724919,
"grad_norm": 0.21884475648403168,
"learning_rate": 1e-06,
"loss": 0.5884,
"num_tokens": 59407638.0,
"step": 61
},
{
"epoch": 0.40129449838187703,
"grad_norm": 0.21299949288368225,
"learning_rate": 1e-06,
"loss": 0.5862,
"num_tokens": 60358388.0,
"step": 62
},
{
"epoch": 0.4077669902912621,
"grad_norm": 0.21099086105823517,
"learning_rate": 1e-06,
"loss": 0.5787,
"num_tokens": 61306137.0,
"step": 63
},
{
"epoch": 0.41423948220064727,
"grad_norm": 0.20446249842643738,
"learning_rate": 1e-06,
"loss": 0.5888,
"num_tokens": 62298616.0,
"step": 64
},
{
"epoch": 0.42071197411003236,
"grad_norm": 0.20021338760852814,
"learning_rate": 1e-06,
"loss": 0.5759,
"num_tokens": 63242620.0,
"step": 65
},
{
"epoch": 0.42718446601941745,
"grad_norm": 0.20259256660938263,
"learning_rate": 1e-06,
"loss": 0.5939,
"num_tokens": 64231555.0,
"step": 66
},
{
"epoch": 0.4336569579288026,
"grad_norm": 0.19663812220096588,
"learning_rate": 1e-06,
"loss": 0.5788,
"num_tokens": 65190302.0,
"step": 67
},
{
"epoch": 0.4401294498381877,
"grad_norm": 0.1927073448896408,
"learning_rate": 1e-06,
"loss": 0.5755,
"num_tokens": 66175194.0,
"step": 68
},
{
"epoch": 0.44660194174757284,
"grad_norm": 0.18935494124889374,
"learning_rate": 1e-06,
"loss": 0.573,
"num_tokens": 67146659.0,
"step": 69
},
{
"epoch": 0.45307443365695793,
"grad_norm": 0.18019847571849823,
"learning_rate": 1e-06,
"loss": 0.5539,
"num_tokens": 68115421.0,
"step": 70
},
{
"epoch": 0.459546925566343,
"grad_norm": 0.17649590969085693,
"learning_rate": 1e-06,
"loss": 0.5506,
"num_tokens": 69078962.0,
"step": 71
},
{
"epoch": 0.46601941747572817,
"grad_norm": 0.16998673975467682,
"learning_rate": 1e-06,
"loss": 0.573,
"num_tokens": 70056552.0,
"step": 72
},
{
"epoch": 0.47249190938511326,
"grad_norm": 0.16949011385440826,
"learning_rate": 1e-06,
"loss": 0.5674,
"num_tokens": 71056405.0,
"step": 73
},
{
"epoch": 0.47896440129449835,
"grad_norm": 0.1706332266330719,
"learning_rate": 1e-06,
"loss": 0.5755,
"num_tokens": 72011963.0,
"step": 74
},
{
"epoch": 0.4854368932038835,
"grad_norm": 0.16622762382030487,
"learning_rate": 1e-06,
"loss": 0.563,
"num_tokens": 72974088.0,
"step": 75
},
{
"epoch": 0.4919093851132686,
"grad_norm": 0.16113753616809845,
"learning_rate": 1e-06,
"loss": 0.5545,
"num_tokens": 73963066.0,
"step": 76
},
{
"epoch": 0.49838187702265374,
"grad_norm": 0.15716572105884552,
"learning_rate": 1e-06,
"loss": 0.5465,
"num_tokens": 74927507.0,
"step": 77
},
{
"epoch": 0.5048543689320388,
"grad_norm": 0.15952879190444946,
"learning_rate": 1e-06,
"loss": 0.5513,
"num_tokens": 75913302.0,
"step": 78
},
{
"epoch": 0.511326860841424,
"grad_norm": 0.1577850878238678,
"learning_rate": 1e-06,
"loss": 0.559,
"num_tokens": 76871637.0,
"step": 79
},
{
"epoch": 0.517799352750809,
"grad_norm": 0.1505461186170578,
"learning_rate": 1e-06,
"loss": 0.5509,
"num_tokens": 77844738.0,
"step": 80
},
{
"epoch": 0.5242718446601942,
"grad_norm": 0.1514582335948944,
"learning_rate": 1e-06,
"loss": 0.5453,
"num_tokens": 78789735.0,
"step": 81
},
{
"epoch": 0.5307443365695793,
"grad_norm": 0.15150229632854462,
"learning_rate": 1e-06,
"loss": 0.5417,
"num_tokens": 79740057.0,
"step": 82
},
{
"epoch": 0.5372168284789643,
"grad_norm": 0.14974889159202576,
"learning_rate": 1e-06,
"loss": 0.5498,
"num_tokens": 80691344.0,
"step": 83
},
{
"epoch": 0.5436893203883495,
"grad_norm": 0.14659319818019867,
"learning_rate": 1e-06,
"loss": 0.5456,
"num_tokens": 81695065.0,
"step": 84
},
{
"epoch": 0.5501618122977346,
"grad_norm": 0.14839723706245422,
"learning_rate": 1e-06,
"loss": 0.5446,
"num_tokens": 82670591.0,
"step": 85
},
{
"epoch": 0.5566343042071198,
"grad_norm": 0.1449030339717865,
"learning_rate": 1e-06,
"loss": 0.5434,
"num_tokens": 83619525.0,
"step": 86
},
{
"epoch": 0.5631067961165048,
"grad_norm": 0.14636728167533875,
"learning_rate": 1e-06,
"loss": 0.5427,
"num_tokens": 84583829.0,
"step": 87
},
{
"epoch": 0.56957928802589,
"grad_norm": 0.14410880208015442,
"learning_rate": 1e-06,
"loss": 0.54,
"num_tokens": 85554557.0,
"step": 88
},
{
"epoch": 0.5760517799352751,
"grad_norm": 0.14123332500457764,
"learning_rate": 1e-06,
"loss": 0.5412,
"num_tokens": 86534800.0,
"step": 89
},
{
"epoch": 0.5825242718446602,
"grad_norm": 0.14631222188472748,
"learning_rate": 1e-06,
"loss": 0.5451,
"num_tokens": 87479322.0,
"step": 90
},
{
"epoch": 0.5889967637540453,
"grad_norm": 0.141328364610672,
"learning_rate": 1e-06,
"loss": 0.543,
"num_tokens": 88440604.0,
"step": 91
},
{
"epoch": 0.5954692556634305,
"grad_norm": 0.14410269260406494,
"learning_rate": 1e-06,
"loss": 0.5389,
"num_tokens": 89440902.0,
"step": 92
},
{
"epoch": 0.6019417475728155,
"grad_norm": 0.14109647274017334,
"learning_rate": 1e-06,
"loss": 0.5312,
"num_tokens": 90420292.0,
"step": 93
},
{
"epoch": 0.6084142394822006,
"grad_norm": 0.14093320071697235,
"learning_rate": 1e-06,
"loss": 0.5432,
"num_tokens": 91365927.0,
"step": 94
},
{
"epoch": 0.6148867313915858,
"grad_norm": 0.1392253190279007,
"learning_rate": 1e-06,
"loss": 0.5342,
"num_tokens": 92339481.0,
"step": 95
},
{
"epoch": 0.6213592233009708,
"grad_norm": 0.14118197560310364,
"learning_rate": 1e-06,
"loss": 0.5291,
"num_tokens": 93277920.0,
"step": 96
},
{
"epoch": 0.627831715210356,
"grad_norm": 0.14340411126613617,
"learning_rate": 1e-06,
"loss": 0.527,
"num_tokens": 94233058.0,
"step": 97
},
{
"epoch": 0.6343042071197411,
"grad_norm": 0.13777285814285278,
"learning_rate": 1e-06,
"loss": 0.5407,
"num_tokens": 95195865.0,
"step": 98
},
{
"epoch": 0.6407766990291263,
"grad_norm": 0.14087024331092834,
"learning_rate": 1e-06,
"loss": 0.5219,
"num_tokens": 96165533.0,
"step": 99
},
{
"epoch": 0.6472491909385113,
"grad_norm": 0.13480006158351898,
"learning_rate": 1e-06,
"loss": 0.5229,
"num_tokens": 97128908.0,
"step": 100
},
{
"epoch": 0.6537216828478964,
"grad_norm": 0.13443224132061005,
"learning_rate": 1e-06,
"loss": 0.5235,
"num_tokens": 98137812.0,
"step": 101
},
{
"epoch": 0.6601941747572816,
"grad_norm": 0.13486695289611816,
"learning_rate": 1e-06,
"loss": 0.521,
"num_tokens": 99133445.0,
"step": 102
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.13491222262382507,
"learning_rate": 1e-06,
"loss": 0.5118,
"num_tokens": 100106034.0,
"step": 103
},
{
"epoch": 0.6731391585760518,
"grad_norm": 0.13977712392807007,
"learning_rate": 1e-06,
"loss": 0.5394,
"num_tokens": 101060830.0,
"step": 104
},
{
"epoch": 0.6796116504854369,
"grad_norm": 0.1345338374376297,
"learning_rate": 1e-06,
"loss": 0.5226,
"num_tokens": 102032708.0,
"step": 105
},
{
"epoch": 0.686084142394822,
"grad_norm": 0.13142578303813934,
"learning_rate": 1e-06,
"loss": 0.5061,
"num_tokens": 103020888.0,
"step": 106
},
{
"epoch": 0.6925566343042071,
"grad_norm": 0.14058642089366913,
"learning_rate": 1e-06,
"loss": 0.5315,
"num_tokens": 103986883.0,
"step": 107
},
{
"epoch": 0.6990291262135923,
"grad_norm": 0.13689804077148438,
"learning_rate": 1e-06,
"loss": 0.5275,
"num_tokens": 104978514.0,
"step": 108
},
{
"epoch": 0.7055016181229773,
"grad_norm": 0.1382923722267151,
"learning_rate": 1e-06,
"loss": 0.5159,
"num_tokens": 105938542.0,
"step": 109
},
{
"epoch": 0.7119741100323624,
"grad_norm": 0.13341419398784637,
"learning_rate": 1e-06,
"loss": 0.5175,
"num_tokens": 106909993.0,
"step": 110
},
{
"epoch": 0.7184466019417476,
"grad_norm": 0.13735052943229675,
"learning_rate": 1e-06,
"loss": 0.5204,
"num_tokens": 107872313.0,
"step": 111
},
{
"epoch": 0.7249190938511327,
"grad_norm": 0.14685890078544617,
"learning_rate": 1e-06,
"loss": 0.5154,
"num_tokens": 108876000.0,
"step": 112
},
{
"epoch": 0.7313915857605178,
"grad_norm": 0.13520538806915283,
"learning_rate": 1e-06,
"loss": 0.5163,
"num_tokens": 109857790.0,
"step": 113
},
{
"epoch": 0.7378640776699029,
"grad_norm": 0.1330995261669159,
"learning_rate": 1e-06,
"loss": 0.5155,
"num_tokens": 110826497.0,
"step": 114
},
{
"epoch": 0.7443365695792881,
"grad_norm": 0.134913370013237,
"learning_rate": 1e-06,
"loss": 0.5142,
"num_tokens": 111775780.0,
"step": 115
},
{
"epoch": 0.7508090614886731,
"grad_norm": 0.13504451513290405,
"learning_rate": 1e-06,
"loss": 0.5161,
"num_tokens": 112755203.0,
"step": 116
},
{
"epoch": 0.7572815533980582,
"grad_norm": 0.13805583119392395,
"learning_rate": 1e-06,
"loss": 0.5166,
"num_tokens": 113720983.0,
"step": 117
},
{
"epoch": 0.7637540453074434,
"grad_norm": 0.137771874666214,
"learning_rate": 1e-06,
"loss": 0.5057,
"num_tokens": 114690500.0,
"step": 118
},
{
"epoch": 0.7702265372168284,
"grad_norm": 0.13603690266609192,
"learning_rate": 1e-06,
"loss": 0.5059,
"num_tokens": 115663752.0,
"step": 119
},
{
"epoch": 0.7766990291262136,
"grad_norm": 0.1295483261346817,
"learning_rate": 1e-06,
"loss": 0.5031,
"num_tokens": 116656118.0,
"step": 120
},
{
"epoch": 0.7831715210355987,
"grad_norm": 0.13289949297904968,
"learning_rate": 1e-06,
"loss": 0.5127,
"num_tokens": 117647006.0,
"step": 121
},
{
"epoch": 0.7896440129449838,
"grad_norm": 0.13150979578495026,
"learning_rate": 1e-06,
"loss": 0.5023,
"num_tokens": 118637898.0,
"step": 122
},
{
"epoch": 0.7961165048543689,
"grad_norm": 0.13312214612960815,
"learning_rate": 1e-06,
"loss": 0.5143,
"num_tokens": 119611539.0,
"step": 123
},
{
"epoch": 0.8025889967637541,
"grad_norm": 0.13679178059101105,
"learning_rate": 1e-06,
"loss": 0.5149,
"num_tokens": 120576485.0,
"step": 124
},
{
"epoch": 0.8090614886731392,
"grad_norm": 0.13002805411815643,
"learning_rate": 1e-06,
"loss": 0.5143,
"num_tokens": 121567862.0,
"step": 125
},
{
"epoch": 0.8155339805825242,
"grad_norm": 0.13436037302017212,
"learning_rate": 1e-06,
"loss": 0.5028,
"num_tokens": 122530893.0,
"step": 126
},
{
"epoch": 0.8220064724919094,
"grad_norm": 0.13467973470687866,
"learning_rate": 1e-06,
"loss": 0.5069,
"num_tokens": 123480227.0,
"step": 127
},
{
"epoch": 0.8284789644012945,
"grad_norm": 0.13601741194725037,
"learning_rate": 1e-06,
"loss": 0.5194,
"num_tokens": 124462070.0,
"step": 128
},
{
"epoch": 0.8349514563106796,
"grad_norm": 0.1381833255290985,
"learning_rate": 1e-06,
"loss": 0.5142,
"num_tokens": 125440428.0,
"step": 129
},
{
"epoch": 0.8414239482200647,
"grad_norm": 0.1346794068813324,
"learning_rate": 1e-06,
"loss": 0.5115,
"num_tokens": 126416310.0,
"step": 130
},
{
"epoch": 0.8478964401294499,
"grad_norm": 0.13682374358177185,
"learning_rate": 1e-06,
"loss": 0.512,
"num_tokens": 127406436.0,
"step": 131
},
{
"epoch": 0.8543689320388349,
"grad_norm": 0.13239574432373047,
"learning_rate": 1e-06,
"loss": 0.5068,
"num_tokens": 128372197.0,
"step": 132
},
{
"epoch": 0.86084142394822,
"grad_norm": 0.13347913324832916,
"learning_rate": 1e-06,
"loss": 0.5062,
"num_tokens": 129371264.0,
"step": 133
},
{
"epoch": 0.8673139158576052,
"grad_norm": 0.1342724710702896,
"learning_rate": 1e-06,
"loss": 0.4917,
"num_tokens": 130306080.0,
"step": 134
},
{
"epoch": 0.8737864077669902,
"grad_norm": 0.13179434835910797,
"learning_rate": 1e-06,
"loss": 0.5107,
"num_tokens": 131261937.0,
"step": 135
},
{
"epoch": 0.8802588996763754,
"grad_norm": 0.13348270952701569,
"learning_rate": 1e-06,
"loss": 0.5044,
"num_tokens": 132223182.0,
"step": 136
},
{
"epoch": 0.8867313915857605,
"grad_norm": 0.22263851761817932,
"learning_rate": 1e-06,
"loss": 0.5082,
"num_tokens": 133211562.0,
"step": 137
},
{
"epoch": 0.8932038834951457,
"grad_norm": 0.13348044455051422,
"learning_rate": 1e-06,
"loss": 0.5043,
"num_tokens": 134208133.0,
"step": 138
},
{
"epoch": 0.8996763754045307,
"grad_norm": 0.13663546741008759,
"learning_rate": 1e-06,
"loss": 0.5083,
"num_tokens": 135194807.0,
"step": 139
},
{
"epoch": 0.9061488673139159,
"grad_norm": 0.1293790638446808,
"learning_rate": 1e-06,
"loss": 0.5046,
"num_tokens": 136175941.0,
"step": 140
},
{
"epoch": 0.912621359223301,
"grad_norm": 0.13469921052455902,
"learning_rate": 1e-06,
"loss": 0.5183,
"num_tokens": 137117638.0,
"step": 141
},
{
"epoch": 0.919093851132686,
"grad_norm": 0.1336260437965393,
"learning_rate": 1e-06,
"loss": 0.4941,
"num_tokens": 138102412.0,
"step": 142
},
{
"epoch": 0.9255663430420712,
"grad_norm": 0.1314634084701538,
"learning_rate": 1e-06,
"loss": 0.497,
"num_tokens": 139056304.0,
"step": 143
},
{
"epoch": 0.9320388349514563,
"grad_norm": 0.13598428666591644,
"learning_rate": 1e-06,
"loss": 0.5026,
"num_tokens": 140002988.0,
"step": 144
},
{
"epoch": 0.9385113268608414,
"grad_norm": 0.13465505838394165,
"learning_rate": 1e-06,
"loss": 0.5051,
"num_tokens": 140939323.0,
"step": 145
},
{
"epoch": 0.9449838187702265,
"grad_norm": 0.13320808112621307,
"learning_rate": 1e-06,
"loss": 0.5007,
"num_tokens": 141898246.0,
"step": 146
},
{
"epoch": 0.9514563106796117,
"grad_norm": 0.13584767282009125,
"learning_rate": 1e-06,
"loss": 0.5074,
"num_tokens": 142861118.0,
"step": 147
},
{
"epoch": 0.9579288025889967,
"grad_norm": 0.13394641876220703,
"learning_rate": 1e-06,
"loss": 0.5002,
"num_tokens": 143845628.0,
"step": 148
},
{
"epoch": 0.9644012944983819,
"grad_norm": 0.13663163781166077,
"learning_rate": 1e-06,
"loss": 0.5129,
"num_tokens": 144816454.0,
"step": 149
},
{
"epoch": 0.970873786407767,
"grad_norm": 0.12985925376415253,
"learning_rate": 1e-06,
"loss": 0.5006,
"num_tokens": 145791528.0,
"step": 150
},
{
"epoch": 0.9773462783171522,
"grad_norm": 0.1344696283340454,
"learning_rate": 1e-06,
"loss": 0.5136,
"num_tokens": 146776225.0,
"step": 151
},
{
"epoch": 0.9838187702265372,
"grad_norm": 0.1393062323331833,
"learning_rate": 1e-06,
"loss": 0.4963,
"num_tokens": 147716038.0,
"step": 152
},
{
"epoch": 0.9902912621359223,
"grad_norm": 0.13568711280822754,
"learning_rate": 1e-06,
"loss": 0.4935,
"num_tokens": 148641036.0,
"step": 153
},
{
"epoch": 0.9967637540453075,
"grad_norm": 0.12916181981563568,
"learning_rate": 1e-06,
"loss": 0.4964,
"num_tokens": 149611850.0,
"step": 154
},
{
"epoch": 0.9967637540453075,
"step": 154,
"total_flos": 6.182777521931551e+18,
"train_loss": 0.6460230693027571,
"train_runtime": 6408.7951,
"train_samples_per_second": 10.787,
"train_steps_per_second": 0.024
}
],
"logging_steps": 1,
"max_steps": 154,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 8,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.182777521931551e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}