gemmy-lora / checkpoint-576 /trainer_state.json
kubernetes-bad's picture
Upload folder using huggingface_hub
493e911 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9751949740034662,
"eval_steps": 500,
"global_step": 576,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0034662045060658577,
"grad_norm": 0.322265625,
"learning_rate": 4.000000000000001e-06,
"loss": 1.5121,
"step": 1
},
{
"epoch": 0.006932409012131715,
"grad_norm": 0.66015625,
"learning_rate": 8.000000000000001e-06,
"loss": 1.5791,
"step": 2
},
{
"epoch": 0.010398613518197574,
"grad_norm": 0.330078125,
"learning_rate": 1.2e-05,
"loss": 1.5152,
"step": 3
},
{
"epoch": 0.01386481802426343,
"grad_norm": 0.32421875,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.5108,
"step": 4
},
{
"epoch": 0.01733102253032929,
"grad_norm": 0.326171875,
"learning_rate": 2e-05,
"loss": 1.516,
"step": 5
},
{
"epoch": 0.02079722703639515,
"grad_norm": 0.32421875,
"learning_rate": 2.4e-05,
"loss": 1.536,
"step": 6
},
{
"epoch": 0.024263431542461005,
"grad_norm": 0.34765625,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.6207,
"step": 7
},
{
"epoch": 0.02772963604852686,
"grad_norm": 0.30078125,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.5638,
"step": 8
},
{
"epoch": 0.03119584055459272,
"grad_norm": 0.234375,
"learning_rate": 3.6e-05,
"loss": 1.508,
"step": 9
},
{
"epoch": 0.03466204506065858,
"grad_norm": 0.2578125,
"learning_rate": 4e-05,
"loss": 1.4821,
"step": 10
},
{
"epoch": 0.038128249566724434,
"grad_norm": 0.287109375,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.4769,
"step": 11
},
{
"epoch": 0.0415944540727903,
"grad_norm": 0.3203125,
"learning_rate": 4.8e-05,
"loss": 1.4965,
"step": 12
},
{
"epoch": 0.045060658578856154,
"grad_norm": 0.2734375,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.4346,
"step": 13
},
{
"epoch": 0.04852686308492201,
"grad_norm": 0.2392578125,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.4573,
"step": 14
},
{
"epoch": 0.05199306759098787,
"grad_norm": 0.263671875,
"learning_rate": 6e-05,
"loss": 1.3925,
"step": 15
},
{
"epoch": 0.05545927209705372,
"grad_norm": 0.2392578125,
"learning_rate": 6.400000000000001e-05,
"loss": 1.45,
"step": 16
},
{
"epoch": 0.058925476603119586,
"grad_norm": 0.20703125,
"learning_rate": 6.800000000000001e-05,
"loss": 1.4752,
"step": 17
},
{
"epoch": 0.06239168110918544,
"grad_norm": 0.1630859375,
"learning_rate": 7.2e-05,
"loss": 1.2915,
"step": 18
},
{
"epoch": 0.0658578856152513,
"grad_norm": 0.1640625,
"learning_rate": 7.6e-05,
"loss": 1.4101,
"step": 19
},
{
"epoch": 0.06932409012131716,
"grad_norm": 0.1328125,
"learning_rate": 8e-05,
"loss": 1.3524,
"step": 20
},
{
"epoch": 0.07279029462738301,
"grad_norm": 0.1279296875,
"learning_rate": 8.4e-05,
"loss": 1.3107,
"step": 21
},
{
"epoch": 0.07625649913344887,
"grad_norm": 0.263671875,
"learning_rate": 8.800000000000001e-05,
"loss": 1.4215,
"step": 22
},
{
"epoch": 0.07972270363951472,
"grad_norm": 0.1435546875,
"learning_rate": 9.200000000000001e-05,
"loss": 1.364,
"step": 23
},
{
"epoch": 0.0831889081455806,
"grad_norm": 0.154296875,
"learning_rate": 9.6e-05,
"loss": 1.3493,
"step": 24
},
{
"epoch": 0.08665511265164645,
"grad_norm": 0.12451171875,
"learning_rate": 0.0001,
"loss": 1.3874,
"step": 25
},
{
"epoch": 0.09012131715771231,
"grad_norm": 0.1220703125,
"learning_rate": 0.00010400000000000001,
"loss": 1.4087,
"step": 26
},
{
"epoch": 0.09358752166377816,
"grad_norm": 0.15234375,
"learning_rate": 0.00010800000000000001,
"loss": 1.3556,
"step": 27
},
{
"epoch": 0.09705372616984402,
"grad_norm": 0.12158203125,
"learning_rate": 0.00011200000000000001,
"loss": 1.3657,
"step": 28
},
{
"epoch": 0.10051993067590988,
"grad_norm": 0.12060546875,
"learning_rate": 0.000116,
"loss": 1.3307,
"step": 29
},
{
"epoch": 0.10398613518197573,
"grad_norm": 0.134765625,
"learning_rate": 0.00012,
"loss": 1.389,
"step": 30
},
{
"epoch": 0.10745233968804159,
"grad_norm": 0.11865234375,
"learning_rate": 0.000124,
"loss": 1.3153,
"step": 31
},
{
"epoch": 0.11091854419410745,
"grad_norm": 0.1162109375,
"learning_rate": 0.00012800000000000002,
"loss": 1.2661,
"step": 32
},
{
"epoch": 0.11438474870017332,
"grad_norm": 0.1240234375,
"learning_rate": 0.000132,
"loss": 1.3624,
"step": 33
},
{
"epoch": 0.11785095320623917,
"grad_norm": 0.1640625,
"learning_rate": 0.00013600000000000003,
"loss": 1.3389,
"step": 34
},
{
"epoch": 0.12131715771230503,
"grad_norm": 0.10888671875,
"learning_rate": 0.00014,
"loss": 1.257,
"step": 35
},
{
"epoch": 0.12478336221837089,
"grad_norm": 0.11083984375,
"learning_rate": 0.000144,
"loss": 1.2892,
"step": 36
},
{
"epoch": 0.12824956672443674,
"grad_norm": 0.2890625,
"learning_rate": 0.000148,
"loss": 1.3073,
"step": 37
},
{
"epoch": 0.1317157712305026,
"grad_norm": 0.11962890625,
"learning_rate": 0.000152,
"loss": 1.2713,
"step": 38
},
{
"epoch": 0.13518197573656845,
"grad_norm": 0.142578125,
"learning_rate": 0.00015600000000000002,
"loss": 1.3651,
"step": 39
},
{
"epoch": 0.1386481802426343,
"grad_norm": 0.123046875,
"learning_rate": 0.00016,
"loss": 1.376,
"step": 40
},
{
"epoch": 0.14211438474870017,
"grad_norm": 0.146484375,
"learning_rate": 0.000164,
"loss": 1.4392,
"step": 41
},
{
"epoch": 0.14558058925476602,
"grad_norm": 0.169921875,
"learning_rate": 0.000168,
"loss": 1.3044,
"step": 42
},
{
"epoch": 0.14904679376083188,
"grad_norm": 0.1484375,
"learning_rate": 0.000172,
"loss": 1.3437,
"step": 43
},
{
"epoch": 0.15251299826689774,
"grad_norm": 0.1640625,
"learning_rate": 0.00017600000000000002,
"loss": 1.3892,
"step": 44
},
{
"epoch": 0.1559792027729636,
"grad_norm": 0.12451171875,
"learning_rate": 0.00018,
"loss": 1.3279,
"step": 45
},
{
"epoch": 0.15944540727902945,
"grad_norm": 0.15625,
"learning_rate": 0.00018400000000000003,
"loss": 1.3208,
"step": 46
},
{
"epoch": 0.16291161178509533,
"grad_norm": 0.1416015625,
"learning_rate": 0.000188,
"loss": 1.3207,
"step": 47
},
{
"epoch": 0.1663778162911612,
"grad_norm": 0.1337890625,
"learning_rate": 0.000192,
"loss": 1.325,
"step": 48
},
{
"epoch": 0.16984402079722705,
"grad_norm": 0.162109375,
"learning_rate": 0.000196,
"loss": 1.3512,
"step": 49
},
{
"epoch": 0.1733102253032929,
"grad_norm": 0.12890625,
"learning_rate": 0.0002,
"loss": 1.2986,
"step": 50
},
{
"epoch": 0.17677642980935876,
"grad_norm": 0.40234375,
"learning_rate": 0.00019999821640202586,
"loss": 1.3212,
"step": 51
},
{
"epoch": 0.18024263431542462,
"grad_norm": 0.1376953125,
"learning_rate": 0.00019999286567172776,
"loss": 1.3269,
"step": 52
},
{
"epoch": 0.18370883882149047,
"grad_norm": 0.13671875,
"learning_rate": 0.00019998394799997682,
"loss": 1.2831,
"step": 53
},
{
"epoch": 0.18717504332755633,
"grad_norm": 0.1591796875,
"learning_rate": 0.00019997146370488383,
"loss": 1.2762,
"step": 54
},
{
"epoch": 0.19064124783362218,
"grad_norm": 0.1279296875,
"learning_rate": 0.00019995541323178807,
"loss": 1.2278,
"step": 55
},
{
"epoch": 0.19410745233968804,
"grad_norm": 0.15234375,
"learning_rate": 0.00019993579715324135,
"loss": 1.2913,
"step": 56
},
{
"epoch": 0.1975736568457539,
"grad_norm": 0.1298828125,
"learning_rate": 0.00019991261616898767,
"loss": 1.2971,
"step": 57
},
{
"epoch": 0.20103986135181975,
"grad_norm": 0.162109375,
"learning_rate": 0.00019988587110593808,
"loss": 1.304,
"step": 58
},
{
"epoch": 0.2045060658578856,
"grad_norm": 0.12890625,
"learning_rate": 0.00019985556291814147,
"loss": 1.2796,
"step": 59
},
{
"epoch": 0.20797227036395147,
"grad_norm": 0.1728515625,
"learning_rate": 0.00019982169268675023,
"loss": 1.3182,
"step": 60
},
{
"epoch": 0.21143847487001732,
"grad_norm": 0.3828125,
"learning_rate": 0.00019978426161998194,
"loss": 1.4574,
"step": 61
},
{
"epoch": 0.21490467937608318,
"grad_norm": 0.13671875,
"learning_rate": 0.00019974327105307604,
"loss": 1.284,
"step": 62
},
{
"epoch": 0.21837088388214904,
"grad_norm": 0.138671875,
"learning_rate": 0.00019969872244824638,
"loss": 1.2724,
"step": 63
},
{
"epoch": 0.2218370883882149,
"grad_norm": 0.546875,
"learning_rate": 0.00019965061739462902,
"loss": 1.28,
"step": 64
},
{
"epoch": 0.22530329289428075,
"grad_norm": 0.1220703125,
"learning_rate": 0.00019959895760822546,
"loss": 1.219,
"step": 65
},
{
"epoch": 0.22876949740034663,
"grad_norm": 0.126953125,
"learning_rate": 0.00019954374493184152,
"loss": 1.2601,
"step": 66
},
{
"epoch": 0.2322357019064125,
"grad_norm": 0.134765625,
"learning_rate": 0.00019948498133502153,
"loss": 1.3263,
"step": 67
},
{
"epoch": 0.23570190641247835,
"grad_norm": 0.1435546875,
"learning_rate": 0.00019942266891397815,
"loss": 1.3496,
"step": 68
},
{
"epoch": 0.2391681109185442,
"grad_norm": 0.125,
"learning_rate": 0.00019935680989151757,
"loss": 1.2856,
"step": 69
},
{
"epoch": 0.24263431542461006,
"grad_norm": 0.138671875,
"learning_rate": 0.0001992874066169601,
"loss": 1.2727,
"step": 70
},
{
"epoch": 0.24610051993067592,
"grad_norm": 0.111328125,
"learning_rate": 0.00019921446156605664,
"loss": 1.214,
"step": 71
},
{
"epoch": 0.24956672443674177,
"grad_norm": 0.150390625,
"learning_rate": 0.00019913797734089997,
"loss": 1.2299,
"step": 72
},
{
"epoch": 0.2530329289428076,
"grad_norm": 0.1357421875,
"learning_rate": 0.00019905795666983234,
"loss": 1.2439,
"step": 73
},
{
"epoch": 0.2564991334488735,
"grad_norm": 0.1435546875,
"learning_rate": 0.00019897440240734788,
"loss": 1.2721,
"step": 74
},
{
"epoch": 0.25996533795493937,
"grad_norm": 0.1337890625,
"learning_rate": 0.00019888731753399088,
"loss": 1.2411,
"step": 75
},
{
"epoch": 0.2634315424610052,
"grad_norm": 0.1494140625,
"learning_rate": 0.00019879670515624936,
"loss": 1.2302,
"step": 76
},
{
"epoch": 0.2668977469670711,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001987025685064444,
"loss": 1.2711,
"step": 77
},
{
"epoch": 0.2703639514731369,
"grad_norm": 0.3203125,
"learning_rate": 0.0001986049109426148,
"loss": 1.2415,
"step": 78
},
{
"epoch": 0.2738301559792028,
"grad_norm": 0.1494140625,
"learning_rate": 0.00019850373594839716,
"loss": 1.3111,
"step": 79
},
{
"epoch": 0.2772963604852686,
"grad_norm": 0.142578125,
"learning_rate": 0.00019839904713290184,
"loss": 1.3138,
"step": 80
},
{
"epoch": 0.2807625649913345,
"grad_norm": 0.1318359375,
"learning_rate": 0.000198290848230584,
"loss": 1.2662,
"step": 81
},
{
"epoch": 0.28422876949740034,
"grad_norm": 0.13671875,
"learning_rate": 0.00019817914310111046,
"loss": 1.2258,
"step": 82
},
{
"epoch": 0.2876949740034662,
"grad_norm": 0.1728515625,
"learning_rate": 0.0001980639357292221,
"loss": 1.2435,
"step": 83
},
{
"epoch": 0.29116117850953205,
"grad_norm": 0.5546875,
"learning_rate": 0.00019794523022459166,
"loss": 1.2568,
"step": 84
},
{
"epoch": 0.29462738301559793,
"grad_norm": 0.1455078125,
"learning_rate": 0.00019782303082167704,
"loss": 1.2181,
"step": 85
},
{
"epoch": 0.29809358752166376,
"grad_norm": 0.283203125,
"learning_rate": 0.00019769734187957038,
"loss": 1.2465,
"step": 86
},
{
"epoch": 0.30155979202772965,
"grad_norm": 0.1552734375,
"learning_rate": 0.00019756816788184259,
"loss": 1.2623,
"step": 87
},
{
"epoch": 0.3050259965337955,
"grad_norm": 0.1474609375,
"learning_rate": 0.00019743551343638324,
"loss": 1.193,
"step": 88
},
{
"epoch": 0.30849220103986136,
"grad_norm": 0.1123046875,
"learning_rate": 0.00019729938327523635,
"loss": 1.2262,
"step": 89
},
{
"epoch": 0.3119584055459272,
"grad_norm": 0.1796875,
"learning_rate": 0.00019715978225443148,
"loss": 1.2862,
"step": 90
},
{
"epoch": 0.31542461005199307,
"grad_norm": 0.146484375,
"learning_rate": 0.00019701671535381064,
"loss": 1.2466,
"step": 91
},
{
"epoch": 0.3188908145580589,
"grad_norm": 0.150390625,
"learning_rate": 0.0001968701876768505,
"loss": 1.2048,
"step": 92
},
{
"epoch": 0.3223570190641248,
"grad_norm": 0.1533203125,
"learning_rate": 0.00019672020445048036,
"loss": 1.3168,
"step": 93
},
{
"epoch": 0.32582322357019067,
"grad_norm": 0.1416015625,
"learning_rate": 0.00019656677102489588,
"loss": 1.2833,
"step": 94
},
{
"epoch": 0.3292894280762565,
"grad_norm": 0.1318359375,
"learning_rate": 0.00019640989287336792,
"loss": 1.3131,
"step": 95
},
{
"epoch": 0.3327556325823224,
"grad_norm": 0.1591796875,
"learning_rate": 0.00019624957559204761,
"loss": 1.2941,
"step": 96
},
{
"epoch": 0.3362218370883882,
"grad_norm": 0.150390625,
"learning_rate": 0.00019608582489976647,
"loss": 1.2618,
"step": 97
},
{
"epoch": 0.3396880415944541,
"grad_norm": 0.130859375,
"learning_rate": 0.0001959186466378326,
"loss": 1.2308,
"step": 98
},
{
"epoch": 0.3431542461005199,
"grad_norm": 0.12890625,
"learning_rate": 0.00019574804676982216,
"loss": 1.1744,
"step": 99
},
{
"epoch": 0.3466204506065858,
"grad_norm": 0.14453125,
"learning_rate": 0.00019557403138136672,
"loss": 1.2391,
"step": 100
},
{
"epoch": 0.35008665511265163,
"grad_norm": 0.1337890625,
"learning_rate": 0.0001953966066799362,
"loss": 1.2976,
"step": 101
},
{
"epoch": 0.3535528596187175,
"grad_norm": 0.1318359375,
"learning_rate": 0.00019521577899461731,
"loss": 1.289,
"step": 102
},
{
"epoch": 0.35701906412478335,
"grad_norm": 0.130859375,
"learning_rate": 0.00019503155477588796,
"loss": 1.219,
"step": 103
},
{
"epoch": 0.36048526863084923,
"grad_norm": 0.181640625,
"learning_rate": 0.000194843940595387,
"loss": 1.2099,
"step": 104
},
{
"epoch": 0.36395147313691506,
"grad_norm": 0.146484375,
"learning_rate": 0.00019465294314567987,
"loss": 1.2524,
"step": 105
},
{
"epoch": 0.36741767764298094,
"grad_norm": 0.12451171875,
"learning_rate": 0.0001944585692400199,
"loss": 1.2382,
"step": 106
},
{
"epoch": 0.3708838821490468,
"grad_norm": 0.1328125,
"learning_rate": 0.0001942608258121051,
"loss": 1.259,
"step": 107
},
{
"epoch": 0.37435008665511266,
"grad_norm": 0.32421875,
"learning_rate": 0.00019405971991583108,
"loss": 1.2613,
"step": 108
},
{
"epoch": 0.3778162911611785,
"grad_norm": 0.349609375,
"learning_rate": 0.0001938552587250392,
"loss": 1.3308,
"step": 109
},
{
"epoch": 0.38128249566724437,
"grad_norm": 0.2138671875,
"learning_rate": 0.00019364744953326074,
"loss": 1.3292,
"step": 110
},
{
"epoch": 0.3847487001733102,
"grad_norm": 0.146484375,
"learning_rate": 0.00019343629975345685,
"loss": 1.2605,
"step": 111
},
{
"epoch": 0.3882149046793761,
"grad_norm": 0.142578125,
"learning_rate": 0.00019322181691775386,
"loss": 1.2033,
"step": 112
},
{
"epoch": 0.39168110918544197,
"grad_norm": 0.138671875,
"learning_rate": 0.00019300400867717482,
"loss": 1.2595,
"step": 113
},
{
"epoch": 0.3951473136915078,
"grad_norm": 0.1962890625,
"learning_rate": 0.00019278288280136647,
"loss": 1.245,
"step": 114
},
{
"epoch": 0.3986135181975737,
"grad_norm": 0.1533203125,
"learning_rate": 0.00019255844717832205,
"loss": 1.2452,
"step": 115
},
{
"epoch": 0.4020797227036395,
"grad_norm": 0.1318359375,
"learning_rate": 0.00019233070981410007,
"loss": 1.2467,
"step": 116
},
{
"epoch": 0.4055459272097054,
"grad_norm": 0.1455078125,
"learning_rate": 0.00019209967883253849,
"loss": 1.2339,
"step": 117
},
{
"epoch": 0.4090121317157712,
"grad_norm": 0.1455078125,
"learning_rate": 0.00019186536247496518,
"loss": 1.2301,
"step": 118
},
{
"epoch": 0.4124783362218371,
"grad_norm": 0.2021484375,
"learning_rate": 0.00019162776909990373,
"loss": 1.2376,
"step": 119
},
{
"epoch": 0.41594454072790293,
"grad_norm": 0.134765625,
"learning_rate": 0.00019138690718277542,
"loss": 1.2114,
"step": 120
},
{
"epoch": 0.4194107452339688,
"grad_norm": 0.1494140625,
"learning_rate": 0.00019114278531559675,
"loss": 1.2057,
"step": 121
},
{
"epoch": 0.42287694974003465,
"grad_norm": 0.1328125,
"learning_rate": 0.0001908954122066731,
"loss": 1.2832,
"step": 122
},
{
"epoch": 0.42634315424610053,
"grad_norm": 0.201171875,
"learning_rate": 0.000190644796680288,
"loss": 1.2481,
"step": 123
},
{
"epoch": 0.42980935875216636,
"grad_norm": 0.3046875,
"learning_rate": 0.00019039094767638832,
"loss": 1.212,
"step": 124
},
{
"epoch": 0.43327556325823224,
"grad_norm": 0.1552734375,
"learning_rate": 0.0001901338742502655,
"loss": 1.2385,
"step": 125
},
{
"epoch": 0.43674176776429807,
"grad_norm": 0.1982421875,
"learning_rate": 0.00018987358557223232,
"loss": 1.2925,
"step": 126
},
{
"epoch": 0.44020797227036396,
"grad_norm": 0.1474609375,
"learning_rate": 0.000189610090927296,
"loss": 1.2285,
"step": 127
},
{
"epoch": 0.4436741767764298,
"grad_norm": 0.12158203125,
"learning_rate": 0.00018934339971482674,
"loss": 1.2074,
"step": 128
},
{
"epoch": 0.44714038128249567,
"grad_norm": 0.126953125,
"learning_rate": 0.00018907352144822284,
"loss": 1.1342,
"step": 129
},
{
"epoch": 0.4506065857885615,
"grad_norm": 0.142578125,
"learning_rate": 0.00018880046575457074,
"loss": 1.2651,
"step": 130
},
{
"epoch": 0.4540727902946274,
"grad_norm": 0.1337890625,
"learning_rate": 0.00018852424237430216,
"loss": 1.1864,
"step": 131
},
{
"epoch": 0.45753899480069327,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001882448611608463,
"loss": 1.1171,
"step": 132
},
{
"epoch": 0.4610051993067591,
"grad_norm": 0.1455078125,
"learning_rate": 0.0001879623320802785,
"loss": 1.2275,
"step": 133
},
{
"epoch": 0.464471403812825,
"grad_norm": 0.384765625,
"learning_rate": 0.00018767666521096466,
"loss": 1.2224,
"step": 134
},
{
"epoch": 0.4679376083188908,
"grad_norm": 0.140625,
"learning_rate": 0.00018738787074320179,
"loss": 1.2373,
"step": 135
},
{
"epoch": 0.4714038128249567,
"grad_norm": 0.1435546875,
"learning_rate": 0.00018709595897885439,
"loss": 1.2542,
"step": 136
},
{
"epoch": 0.4748700173310225,
"grad_norm": 0.146484375,
"learning_rate": 0.00018680094033098716,
"loss": 1.2231,
"step": 137
},
{
"epoch": 0.4783362218370884,
"grad_norm": 0.1298828125,
"learning_rate": 0.00018650282532349332,
"loss": 1.1947,
"step": 138
},
{
"epoch": 0.48180242634315423,
"grad_norm": 0.1630859375,
"learning_rate": 0.00018620162459071936,
"loss": 1.2309,
"step": 139
},
{
"epoch": 0.4852686308492201,
"grad_norm": 0.1552734375,
"learning_rate": 0.00018589734887708556,
"loss": 1.2276,
"step": 140
},
{
"epoch": 0.48873483535528595,
"grad_norm": 0.14453125,
"learning_rate": 0.0001855900090367029,
"loss": 1.2637,
"step": 141
},
{
"epoch": 0.49220103986135183,
"grad_norm": 0.1416015625,
"learning_rate": 0.00018527961603298572,
"loss": 1.3006,
"step": 142
},
{
"epoch": 0.49566724436741766,
"grad_norm": 0.1298828125,
"learning_rate": 0.00018496618093826063,
"loss": 1.2016,
"step": 143
},
{
"epoch": 0.49913344887348354,
"grad_norm": 0.1484375,
"learning_rate": 0.00018464971493337167,
"loss": 1.1819,
"step": 144
},
{
"epoch": 0.5025996533795494,
"grad_norm": 0.1552734375,
"learning_rate": 0.00018433022930728133,
"loss": 1.2551,
"step": 145
},
{
"epoch": 0.5060658578856152,
"grad_norm": 0.1337890625,
"learning_rate": 0.00018400773545666787,
"loss": 1.2634,
"step": 146
},
{
"epoch": 0.5095320623916811,
"grad_norm": 0.1416015625,
"learning_rate": 0.00018368224488551896,
"loss": 1.1993,
"step": 147
},
{
"epoch": 0.512998266897747,
"grad_norm": 0.1533203125,
"learning_rate": 0.00018335376920472097,
"loss": 1.2224,
"step": 148
},
{
"epoch": 0.5164644714038128,
"grad_norm": 0.16796875,
"learning_rate": 0.00018302232013164518,
"loss": 1.2288,
"step": 149
},
{
"epoch": 0.5199306759098787,
"grad_norm": 0.1328125,
"learning_rate": 0.0001826879094897294,
"loss": 1.1786,
"step": 150
},
{
"epoch": 0.5233968804159446,
"grad_norm": 0.138671875,
"learning_rate": 0.00018235054920805652,
"loss": 1.2411,
"step": 151
},
{
"epoch": 0.5268630849220104,
"grad_norm": 0.1337890625,
"learning_rate": 0.00018201025132092889,
"loss": 1.1916,
"step": 152
},
{
"epoch": 0.5303292894280762,
"grad_norm": 0.21875,
"learning_rate": 0.00018166702796743888,
"loss": 1.2354,
"step": 153
},
{
"epoch": 0.5337954939341422,
"grad_norm": 0.13671875,
"learning_rate": 0.00018132089139103613,
"loss": 1.24,
"step": 154
},
{
"epoch": 0.537261698440208,
"grad_norm": 0.1552734375,
"learning_rate": 0.00018097185393909049,
"loss": 1.2284,
"step": 155
},
{
"epoch": 0.5407279029462738,
"grad_norm": 0.294921875,
"learning_rate": 0.00018061992806245184,
"loss": 1.2164,
"step": 156
},
{
"epoch": 0.5441941074523396,
"grad_norm": 0.130859375,
"learning_rate": 0.00018026512631500583,
"loss": 1.204,
"step": 157
},
{
"epoch": 0.5476603119584056,
"grad_norm": 0.1357421875,
"learning_rate": 0.00017990746135322592,
"loss": 1.2307,
"step": 158
},
{
"epoch": 0.5511265164644714,
"grad_norm": 0.1630859375,
"learning_rate": 0.00017954694593572227,
"loss": 1.2445,
"step": 159
},
{
"epoch": 0.5545927209705372,
"grad_norm": 0.1904296875,
"learning_rate": 0.00017918359292278611,
"loss": 1.2107,
"step": 160
},
{
"epoch": 0.5580589254766031,
"grad_norm": 0.1484375,
"learning_rate": 0.0001788174152759315,
"loss": 1.1999,
"step": 161
},
{
"epoch": 0.561525129982669,
"grad_norm": 0.1337890625,
"learning_rate": 0.00017844842605743258,
"loss": 1.2639,
"step": 162
},
{
"epoch": 0.5649913344887348,
"grad_norm": 0.115234375,
"learning_rate": 0.00017807663842985777,
"loss": 1.1628,
"step": 163
},
{
"epoch": 0.5684575389948007,
"grad_norm": 0.1513671875,
"learning_rate": 0.00017770206565560033,
"loss": 1.2099,
"step": 164
},
{
"epoch": 0.5719237435008665,
"grad_norm": 0.1318359375,
"learning_rate": 0.00017732472109640503,
"loss": 1.231,
"step": 165
},
{
"epoch": 0.5753899480069324,
"grad_norm": 0.1318359375,
"learning_rate": 0.00017694461821289172,
"loss": 1.1915,
"step": 166
},
{
"epoch": 0.5788561525129983,
"grad_norm": 0.1416015625,
"learning_rate": 0.00017656177056407505,
"loss": 1.2476,
"step": 167
},
{
"epoch": 0.5823223570190641,
"grad_norm": 0.1435546875,
"learning_rate": 0.00017617619180688085,
"loss": 1.2174,
"step": 168
},
{
"epoch": 0.58578856152513,
"grad_norm": 0.1611328125,
"learning_rate": 0.0001757878956956589,
"loss": 1.1836,
"step": 169
},
{
"epoch": 0.5892547660311959,
"grad_norm": 0.1376953125,
"learning_rate": 0.00017539689608169238,
"loss": 1.1782,
"step": 170
},
{
"epoch": 0.5927209705372617,
"grad_norm": 0.16796875,
"learning_rate": 0.00017500320691270365,
"loss": 1.2416,
"step": 171
},
{
"epoch": 0.5961871750433275,
"grad_norm": 0.1328125,
"learning_rate": 0.0001746068422323568,
"loss": 1.1911,
"step": 172
},
{
"epoch": 0.5996533795493935,
"grad_norm": 0.1357421875,
"learning_rate": 0.00017420781617975665,
"loss": 1.2743,
"step": 173
},
{
"epoch": 0.6031195840554593,
"grad_norm": 0.1240234375,
"learning_rate": 0.00017380614298894442,
"loss": 1.2339,
"step": 174
},
{
"epoch": 0.6065857885615251,
"grad_norm": 0.1396484375,
"learning_rate": 0.0001734018369883898,
"loss": 1.2063,
"step": 175
},
{
"epoch": 0.610051993067591,
"grad_norm": 0.138671875,
"learning_rate": 0.0001729949126004802,
"loss": 1.2124,
"step": 176
},
{
"epoch": 0.6135181975736569,
"grad_norm": 0.134765625,
"learning_rate": 0.00017258538434100577,
"loss": 1.1958,
"step": 177
},
{
"epoch": 0.6169844020797227,
"grad_norm": 0.193359375,
"learning_rate": 0.00017217326681864207,
"loss": 1.1745,
"step": 178
},
{
"epoch": 0.6204506065857885,
"grad_norm": 0.16015625,
"learning_rate": 0.00017175857473442863,
"loss": 1.1491,
"step": 179
},
{
"epoch": 0.6239168110918544,
"grad_norm": 0.1181640625,
"learning_rate": 0.00017134132288124465,
"loss": 1.1795,
"step": 180
},
{
"epoch": 0.6273830155979203,
"grad_norm": 0.12451171875,
"learning_rate": 0.00017092152614328135,
"loss": 1.1323,
"step": 181
},
{
"epoch": 0.6308492201039861,
"grad_norm": 0.138671875,
"learning_rate": 0.00017049919949551102,
"loss": 1.2577,
"step": 182
},
{
"epoch": 0.634315424610052,
"grad_norm": 0.14453125,
"learning_rate": 0.00017007435800315263,
"loss": 1.2299,
"step": 183
},
{
"epoch": 0.6377816291161178,
"grad_norm": 0.1640625,
"learning_rate": 0.00016964701682113475,
"loss": 1.2503,
"step": 184
},
{
"epoch": 0.6412478336221837,
"grad_norm": 0.130859375,
"learning_rate": 0.00016921719119355468,
"loss": 1.1444,
"step": 185
},
{
"epoch": 0.6447140381282496,
"grad_norm": 0.134765625,
"learning_rate": 0.0001687848964531348,
"loss": 1.1779,
"step": 186
},
{
"epoch": 0.6481802426343154,
"grad_norm": 0.18359375,
"learning_rate": 0.00016835014802067558,
"loss": 1.1597,
"step": 187
},
{
"epoch": 0.6516464471403813,
"grad_norm": 0.13671875,
"learning_rate": 0.00016791296140450545,
"loss": 1.2693,
"step": 188
},
{
"epoch": 0.6551126516464472,
"grad_norm": 0.1337890625,
"learning_rate": 0.00016747335219992774,
"loss": 1.2418,
"step": 189
},
{
"epoch": 0.658578856152513,
"grad_norm": 0.130859375,
"learning_rate": 0.00016703133608866414,
"loss": 1.2335,
"step": 190
},
{
"epoch": 0.6620450606585788,
"grad_norm": 0.126953125,
"learning_rate": 0.00016658692883829548,
"loss": 1.1994,
"step": 191
},
{
"epoch": 0.6655112651646448,
"grad_norm": 0.1259765625,
"learning_rate": 0.00016614014630169917,
"loss": 1.1553,
"step": 192
},
{
"epoch": 0.6689774696707106,
"grad_norm": 0.1357421875,
"learning_rate": 0.00016569100441648374,
"loss": 1.187,
"step": 193
},
{
"epoch": 0.6724436741767764,
"grad_norm": 0.1259765625,
"learning_rate": 0.00016523951920442034,
"loss": 1.2037,
"step": 194
},
{
"epoch": 0.6759098786828422,
"grad_norm": 0.1318359375,
"learning_rate": 0.00016478570677087116,
"loss": 1.2211,
"step": 195
},
{
"epoch": 0.6793760831889082,
"grad_norm": 0.193359375,
"learning_rate": 0.00016432958330421497,
"loss": 1.2273,
"step": 196
},
{
"epoch": 0.682842287694974,
"grad_norm": 0.1435546875,
"learning_rate": 0.00016387116507526957,
"loss": 1.1865,
"step": 197
},
{
"epoch": 0.6863084922010398,
"grad_norm": 0.12451171875,
"learning_rate": 0.00016341046843671144,
"loss": 1.192,
"step": 198
},
{
"epoch": 0.6897746967071057,
"grad_norm": 0.150390625,
"learning_rate": 0.0001629475098224924,
"loss": 1.255,
"step": 199
},
{
"epoch": 0.6932409012131716,
"grad_norm": 0.1396484375,
"learning_rate": 0.0001624823057472534,
"loss": 1.2314,
"step": 200
},
{
"epoch": 0.6967071057192374,
"grad_norm": 0.13671875,
"learning_rate": 0.00016201487280573536,
"loss": 1.2079,
"step": 201
},
{
"epoch": 0.7001733102253033,
"grad_norm": 0.16015625,
"learning_rate": 0.00016154522767218725,
"loss": 1.2081,
"step": 202
},
{
"epoch": 0.7036395147313691,
"grad_norm": 0.1494140625,
"learning_rate": 0.0001610733870997712,
"loss": 1.2703,
"step": 203
},
{
"epoch": 0.707105719237435,
"grad_norm": 0.146484375,
"learning_rate": 0.000160599367919965,
"loss": 1.2438,
"step": 204
},
{
"epoch": 0.7105719237435009,
"grad_norm": 0.142578125,
"learning_rate": 0.00016012318704196164,
"loss": 1.2292,
"step": 205
},
{
"epoch": 0.7140381282495667,
"grad_norm": 0.1494140625,
"learning_rate": 0.0001596448614520661,
"loss": 1.2269,
"step": 206
},
{
"epoch": 0.7175043327556326,
"grad_norm": 0.263671875,
"learning_rate": 0.0001591644082130895,
"loss": 1.2389,
"step": 207
},
{
"epoch": 0.7209705372616985,
"grad_norm": 0.1474609375,
"learning_rate": 0.0001586818444637402,
"loss": 1.2191,
"step": 208
},
{
"epoch": 0.7244367417677643,
"grad_norm": 0.1630859375,
"learning_rate": 0.00015819718741801283,
"loss": 1.1904,
"step": 209
},
{
"epoch": 0.7279029462738301,
"grad_norm": 0.140625,
"learning_rate": 0.0001577104543645738,
"loss": 1.2045,
"step": 210
},
{
"epoch": 0.7313691507798961,
"grad_norm": 0.13671875,
"learning_rate": 0.00015722166266614494,
"loss": 1.2283,
"step": 211
},
{
"epoch": 0.7348353552859619,
"grad_norm": 0.265625,
"learning_rate": 0.00015673082975888386,
"loss": 1.2956,
"step": 212
},
{
"epoch": 0.7383015597920277,
"grad_norm": 0.138671875,
"learning_rate": 0.00015623797315176218,
"loss": 1.2189,
"step": 213
},
{
"epoch": 0.7417677642980935,
"grad_norm": 0.2099609375,
"learning_rate": 0.0001557431104259408,
"loss": 1.1593,
"step": 214
},
{
"epoch": 0.7452339688041595,
"grad_norm": 0.1376953125,
"learning_rate": 0.00015524625923414283,
"loss": 1.1329,
"step": 215
},
{
"epoch": 0.7487001733102253,
"grad_norm": 0.130859375,
"learning_rate": 0.0001547474373000238,
"loss": 1.1314,
"step": 216
},
{
"epoch": 0.7521663778162911,
"grad_norm": 0.1572265625,
"learning_rate": 0.00015424666241753966,
"loss": 1.2665,
"step": 217
},
{
"epoch": 0.755632582322357,
"grad_norm": 0.15234375,
"learning_rate": 0.0001537439524503116,
"loss": 1.2165,
"step": 218
},
{
"epoch": 0.7590987868284229,
"grad_norm": 0.1318359375,
"learning_rate": 0.00015323932533098925,
"loss": 1.2062,
"step": 219
},
{
"epoch": 0.7625649913344887,
"grad_norm": 0.1298828125,
"learning_rate": 0.00015273279906061082,
"loss": 1.1524,
"step": 220
},
{
"epoch": 0.7660311958405546,
"grad_norm": 0.140625,
"learning_rate": 0.0001522243917079608,
"loss": 1.1551,
"step": 221
},
{
"epoch": 0.7694974003466204,
"grad_norm": 0.1474609375,
"learning_rate": 0.00015171412140892575,
"loss": 1.2309,
"step": 222
},
{
"epoch": 0.7729636048526863,
"grad_norm": 0.150390625,
"learning_rate": 0.0001512020063658471,
"loss": 1.1908,
"step": 223
},
{
"epoch": 0.7764298093587522,
"grad_norm": 0.12158203125,
"learning_rate": 0.0001506880648468719,
"loss": 1.1917,
"step": 224
},
{
"epoch": 0.779896013864818,
"grad_norm": 0.146484375,
"learning_rate": 0.00015017231518530118,
"loss": 1.1562,
"step": 225
},
{
"epoch": 0.7833622183708839,
"grad_norm": 0.1396484375,
"learning_rate": 0.00014965477577893598,
"loss": 1.1602,
"step": 226
},
{
"epoch": 0.7868284228769498,
"grad_norm": 0.1630859375,
"learning_rate": 0.00014913546508942105,
"loss": 1.2065,
"step": 227
},
{
"epoch": 0.7902946273830156,
"grad_norm": 0.1484375,
"learning_rate": 0.0001486144016415862,
"loss": 1.1824,
"step": 228
},
{
"epoch": 0.7937608318890814,
"grad_norm": 0.1513671875,
"learning_rate": 0.00014809160402278572,
"loss": 1.1687,
"step": 229
},
{
"epoch": 0.7972270363951474,
"grad_norm": 0.158203125,
"learning_rate": 0.0001475670908822351,
"loss": 1.2072,
"step": 230
},
{
"epoch": 0.8006932409012132,
"grad_norm": 0.1396484375,
"learning_rate": 0.0001470408809303457,
"loss": 1.2255,
"step": 231
},
{
"epoch": 0.804159445407279,
"grad_norm": 0.1435546875,
"learning_rate": 0.00014651299293805774,
"loss": 1.243,
"step": 232
},
{
"epoch": 0.8076256499133448,
"grad_norm": 0.1337890625,
"learning_rate": 0.00014598344573617022,
"loss": 1.1895,
"step": 233
},
{
"epoch": 0.8110918544194108,
"grad_norm": 0.12890625,
"learning_rate": 0.0001454522582146695,
"loss": 1.1114,
"step": 234
},
{
"epoch": 0.8145580589254766,
"grad_norm": 0.1728515625,
"learning_rate": 0.0001449194493220553,
"loss": 1.1989,
"step": 235
},
{
"epoch": 0.8180242634315424,
"grad_norm": 0.1435546875,
"learning_rate": 0.0001443850380646649,
"loss": 1.2013,
"step": 236
},
{
"epoch": 0.8214904679376083,
"grad_norm": 0.1669921875,
"learning_rate": 0.00014384904350599496,
"loss": 1.1654,
"step": 237
},
{
"epoch": 0.8249566724436742,
"grad_norm": 0.142578125,
"learning_rate": 0.0001433114847660217,
"loss": 1.1622,
"step": 238
},
{
"epoch": 0.82842287694974,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001427723810205187,
"loss": 1.1871,
"step": 239
},
{
"epoch": 0.8318890814558059,
"grad_norm": 0.173828125,
"learning_rate": 0.00014223175150037296,
"loss": 1.2285,
"step": 240
},
{
"epoch": 0.8353552859618717,
"grad_norm": 0.169921875,
"learning_rate": 0.00014168961549089874,
"loss": 1.1787,
"step": 241
},
{
"epoch": 0.8388214904679376,
"grad_norm": 0.1484375,
"learning_rate": 0.00014114599233114986,
"loss": 1.1703,
"step": 242
},
{
"epoch": 0.8422876949740035,
"grad_norm": 0.13671875,
"learning_rate": 0.00014060090141322968,
"loss": 1.1674,
"step": 243
},
{
"epoch": 0.8457538994800693,
"grad_norm": 0.173828125,
"learning_rate": 0.00014005436218159927,
"loss": 1.2099,
"step": 244
},
{
"epoch": 0.8492201039861352,
"grad_norm": 0.169921875,
"learning_rate": 0.00013950639413238394,
"loss": 1.1779,
"step": 245
},
{
"epoch": 0.8526863084922011,
"grad_norm": 0.13671875,
"learning_rate": 0.00013895701681267784,
"loss": 1.2203,
"step": 246
},
{
"epoch": 0.8561525129982669,
"grad_norm": 0.14453125,
"learning_rate": 0.0001384062498198464,
"loss": 1.1241,
"step": 247
},
{
"epoch": 0.8596187175043327,
"grad_norm": 0.140625,
"learning_rate": 0.00013785411280082746,
"loss": 1.2283,
"step": 248
},
{
"epoch": 0.8630849220103987,
"grad_norm": 0.3984375,
"learning_rate": 0.0001373006254514304,
"loss": 1.2642,
"step": 249
},
{
"epoch": 0.8665511265164645,
"grad_norm": 0.1298828125,
"learning_rate": 0.00013674580751563356,
"loss": 1.1703,
"step": 250
},
{
"epoch": 0.8700173310225303,
"grad_norm": 0.146484375,
"learning_rate": 0.00013618967878487983,
"loss": 1.2154,
"step": 251
},
{
"epoch": 0.8734835355285961,
"grad_norm": 0.275390625,
"learning_rate": 0.00013563225909737076,
"loss": 1.1762,
"step": 252
},
{
"epoch": 0.8769497400346621,
"grad_norm": 0.1904296875,
"learning_rate": 0.00013507356833735888,
"loss": 1.2806,
"step": 253
},
{
"epoch": 0.8804159445407279,
"grad_norm": 0.150390625,
"learning_rate": 0.00013451362643443832,
"loss": 1.2032,
"step": 254
},
{
"epoch": 0.8838821490467937,
"grad_norm": 0.15625,
"learning_rate": 0.00013395245336283396,
"loss": 1.2122,
"step": 255
},
{
"epoch": 0.8873483535528596,
"grad_norm": 0.16015625,
"learning_rate": 0.0001333900691406889,
"loss": 1.1839,
"step": 256
},
{
"epoch": 0.8908145580589255,
"grad_norm": 0.1416015625,
"learning_rate": 0.00013282649382935027,
"loss": 1.184,
"step": 257
},
{
"epoch": 0.8942807625649913,
"grad_norm": 0.130859375,
"learning_rate": 0.0001322617475326538,
"loss": 1.1703,
"step": 258
},
{
"epoch": 0.8977469670710572,
"grad_norm": 0.205078125,
"learning_rate": 0.0001316958503962065,
"loss": 1.2546,
"step": 259
},
{
"epoch": 0.901213171577123,
"grad_norm": 0.2177734375,
"learning_rate": 0.00013112882260666805,
"loss": 1.2206,
"step": 260
},
{
"epoch": 0.9046793760831889,
"grad_norm": 0.125,
"learning_rate": 0.00013056068439103085,
"loss": 1.16,
"step": 261
},
{
"epoch": 0.9081455805892548,
"grad_norm": 0.1591796875,
"learning_rate": 0.00012999145601589823,
"loss": 1.2333,
"step": 262
},
{
"epoch": 0.9116117850953206,
"grad_norm": 0.2109375,
"learning_rate": 0.00012942115778676177,
"loss": 1.1691,
"step": 263
},
{
"epoch": 0.9150779896013865,
"grad_norm": 0.19140625,
"learning_rate": 0.00012884981004727676,
"loss": 1.2484,
"step": 264
},
{
"epoch": 0.9185441941074524,
"grad_norm": 0.126953125,
"learning_rate": 0.00012827743317853665,
"loss": 1.1829,
"step": 265
},
{
"epoch": 0.9220103986135182,
"grad_norm": 0.19921875,
"learning_rate": 0.00012770404759834594,
"loss": 1.1925,
"step": 266
},
{
"epoch": 0.925476603119584,
"grad_norm": 0.1455078125,
"learning_rate": 0.00012712967376049176,
"loss": 1.2528,
"step": 267
},
{
"epoch": 0.92894280762565,
"grad_norm": 0.17578125,
"learning_rate": 0.00012655433215401438,
"loss": 1.2717,
"step": 268
},
{
"epoch": 0.9324090121317158,
"grad_norm": 0.154296875,
"learning_rate": 0.0001259780433024763,
"loss": 1.2076,
"step": 269
},
{
"epoch": 0.9358752166377816,
"grad_norm": 0.46484375,
"learning_rate": 0.00012540082776323007,
"loss": 1.2072,
"step": 270
},
{
"epoch": 0.9393414211438474,
"grad_norm": 0.1357421875,
"learning_rate": 0.00012482270612668508,
"loss": 1.1718,
"step": 271
},
{
"epoch": 0.9428076256499134,
"grad_norm": 0.134765625,
"learning_rate": 0.0001242436990155728,
"loss": 1.1794,
"step": 272
},
{
"epoch": 0.9462738301559792,
"grad_norm": 0.1494140625,
"learning_rate": 0.00012366382708421154,
"loss": 1.2162,
"step": 273
},
{
"epoch": 0.949740034662045,
"grad_norm": 0.1806640625,
"learning_rate": 0.00012308311101776932,
"loss": 1.22,
"step": 274
},
{
"epoch": 0.9532062391681109,
"grad_norm": 0.13671875,
"learning_rate": 0.0001225015715315261,
"loss": 1.1998,
"step": 275
},
{
"epoch": 0.9566724436741768,
"grad_norm": 0.6015625,
"learning_rate": 0.00012191922937013489,
"loss": 1.1541,
"step": 276
},
{
"epoch": 0.9601386481802426,
"grad_norm": 0.1650390625,
"learning_rate": 0.00012133610530688168,
"loss": 1.2099,
"step": 277
},
{
"epoch": 0.9636048526863085,
"grad_norm": 0.1474609375,
"learning_rate": 0.00012075222014294447,
"loss": 1.2157,
"step": 278
},
{
"epoch": 0.9670710571923743,
"grad_norm": 0.1298828125,
"learning_rate": 0.00012016759470665112,
"loss": 1.1958,
"step": 279
},
{
"epoch": 0.9705372616984402,
"grad_norm": 0.1767578125,
"learning_rate": 0.00011958224985273648,
"loss": 1.1857,
"step": 280
},
{
"epoch": 0.9740034662045061,
"grad_norm": 0.1376953125,
"learning_rate": 0.00011899620646159855,
"loss": 1.1818,
"step": 281
},
{
"epoch": 0.9774696707105719,
"grad_norm": 0.130859375,
"learning_rate": 0.00011840948543855335,
"loss": 1.1787,
"step": 282
},
{
"epoch": 0.9809358752166378,
"grad_norm": 0.1279296875,
"learning_rate": 0.00011782210771308948,
"loss": 1.1672,
"step": 283
},
{
"epoch": 0.9844020797227037,
"grad_norm": 0.142578125,
"learning_rate": 0.00011723409423812134,
"loss": 1.1523,
"step": 284
},
{
"epoch": 0.9878682842287695,
"grad_norm": 0.1474609375,
"learning_rate": 0.00011664546598924184,
"loss": 1.1124,
"step": 285
},
{
"epoch": 0.9913344887348353,
"grad_norm": 0.1416015625,
"learning_rate": 0.00011605624396397398,
"loss": 1.2084,
"step": 286
},
{
"epoch": 0.9948006932409013,
"grad_norm": 0.1533203125,
"learning_rate": 0.00011546644918102196,
"loss": 1.1845,
"step": 287
},
{
"epoch": 0.9982668977469671,
"grad_norm": 0.13671875,
"learning_rate": 0.00011487610267952142,
"loss": 1.1879,
"step": 288
},
{
"epoch": 1.001733102253033,
"grad_norm": 0.150390625,
"learning_rate": 0.00011428522551828884,
"loss": 1.1703,
"step": 289
},
{
"epoch": 1.0051993067590987,
"grad_norm": 0.1689453125,
"learning_rate": 0.00011369383877507034,
"loss": 1.2222,
"step": 290
},
{
"epoch": 1.0086655112651646,
"grad_norm": 0.1845703125,
"learning_rate": 0.00011310196354578992,
"loss": 1.1581,
"step": 291
},
{
"epoch": 1.0121317157712304,
"grad_norm": 0.1611328125,
"learning_rate": 0.0001125096209437967,
"loss": 1.1404,
"step": 292
},
{
"epoch": 1.0155979202772965,
"grad_norm": 0.150390625,
"learning_rate": 0.00011191683209911202,
"loss": 1.1745,
"step": 293
},
{
"epoch": 1.0190641247833623,
"grad_norm": 0.142578125,
"learning_rate": 0.00011132361815767554,
"loss": 1.1842,
"step": 294
},
{
"epoch": 1.0011915077989602,
"grad_norm": 0.150390625,
"learning_rate": 0.00011073000028059094,
"loss": 1.1571,
"step": 295
},
{
"epoch": 1.004657712305026,
"grad_norm": 0.150390625,
"learning_rate": 0.00011013599964337107,
"loss": 1.1048,
"step": 296
},
{
"epoch": 1.0081239168110918,
"grad_norm": 0.162109375,
"learning_rate": 0.0001095416374351826,
"loss": 1.1669,
"step": 297
},
{
"epoch": 1.0115901213171576,
"grad_norm": 0.1484375,
"learning_rate": 0.00010894693485809016,
"loss": 1.1425,
"step": 298
},
{
"epoch": 1.0150563258232235,
"grad_norm": 0.1435546875,
"learning_rate": 0.00010835191312629993,
"loss": 1.1316,
"step": 299
},
{
"epoch": 1.0185225303292895,
"grad_norm": 0.169921875,
"learning_rate": 0.00010775659346540303,
"loss": 1.1648,
"step": 300
},
{
"epoch": 1.0219887348353554,
"grad_norm": 0.150390625,
"learning_rate": 0.00010716099711161833,
"loss": 1.1439,
"step": 301
},
{
"epoch": 1.0254549393414212,
"grad_norm": 0.1689453125,
"learning_rate": 0.00010656514531103483,
"loss": 1.0911,
"step": 302
},
{
"epoch": 1.028921143847487,
"grad_norm": 0.181640625,
"learning_rate": 0.00010596905931885374,
"loss": 1.1939,
"step": 303
},
{
"epoch": 1.0323873483535528,
"grad_norm": 0.193359375,
"learning_rate": 0.00010537276039863049,
"loss": 1.1634,
"step": 304
},
{
"epoch": 1.0358535528596187,
"grad_norm": 0.1484375,
"learning_rate": 0.00010477626982151603,
"loss": 1.1423,
"step": 305
},
{
"epoch": 1.0393197573656845,
"grad_norm": 0.150390625,
"learning_rate": 0.00010417960886549798,
"loss": 1.0854,
"step": 306
},
{
"epoch": 1.0427859618717505,
"grad_norm": 0.1611328125,
"learning_rate": 0.00010358279881464182,
"loss": 1.1668,
"step": 307
},
{
"epoch": 1.0462521663778164,
"grad_norm": 0.1474609375,
"learning_rate": 0.00010298586095833151,
"loss": 1.0855,
"step": 308
},
{
"epoch": 1.0497183708838822,
"grad_norm": 0.1552734375,
"learning_rate": 0.00010238881659051008,
"loss": 1.1592,
"step": 309
},
{
"epoch": 1.053184575389948,
"grad_norm": 0.1396484375,
"learning_rate": 0.00010179168700892001,
"loss": 1.0904,
"step": 310
},
{
"epoch": 1.0566507798960139,
"grad_norm": 0.13671875,
"learning_rate": 0.00010119449351434353,
"loss": 1.1081,
"step": 311
},
{
"epoch": 1.0601169844020797,
"grad_norm": 0.158203125,
"learning_rate": 0.00010059725740984284,
"loss": 1.1872,
"step": 312
},
{
"epoch": 1.0635831889081455,
"grad_norm": 0.5234375,
"learning_rate": 0.0001,
"loss": 1.0789,
"step": 313
},
{
"epoch": 1.0670493934142113,
"grad_norm": 0.1455078125,
"learning_rate": 9.940274259015721e-05,
"loss": 1.1881,
"step": 314
},
{
"epoch": 1.0705155979202774,
"grad_norm": 0.1494140625,
"learning_rate": 9.880550648565648e-05,
"loss": 1.2129,
"step": 315
},
{
"epoch": 1.0739818024263432,
"grad_norm": 0.1552734375,
"learning_rate": 9.820831299108003e-05,
"loss": 1.1645,
"step": 316
},
{
"epoch": 1.077448006932409,
"grad_norm": 0.1552734375,
"learning_rate": 9.761118340948999e-05,
"loss": 1.1032,
"step": 317
},
{
"epoch": 1.0809142114384749,
"grad_norm": 0.1484375,
"learning_rate": 9.701413904166852e-05,
"loss": 1.183,
"step": 318
},
{
"epoch": 1.0843804159445407,
"grad_norm": 0.173828125,
"learning_rate": 9.64172011853582e-05,
"loss": 1.1968,
"step": 319
},
{
"epoch": 1.0878466204506065,
"grad_norm": 0.1474609375,
"learning_rate": 9.582039113450208e-05,
"loss": 1.1083,
"step": 320
},
{
"epoch": 1.0913128249566724,
"grad_norm": 0.138671875,
"learning_rate": 9.522373017848401e-05,
"loss": 1.1315,
"step": 321
},
{
"epoch": 1.0947790294627384,
"grad_norm": 0.1455078125,
"learning_rate": 9.462723960136952e-05,
"loss": 1.1159,
"step": 322
},
{
"epoch": 1.0982452339688042,
"grad_norm": 0.1484375,
"learning_rate": 9.403094068114626e-05,
"loss": 1.0632,
"step": 323
},
{
"epoch": 1.10171143847487,
"grad_norm": 0.1513671875,
"learning_rate": 9.34348546889652e-05,
"loss": 1.0992,
"step": 324
},
{
"epoch": 1.105177642980936,
"grad_norm": 0.1572265625,
"learning_rate": 9.28390028883817e-05,
"loss": 1.1151,
"step": 325
},
{
"epoch": 1.1086438474870017,
"grad_norm": 0.1533203125,
"learning_rate": 9.224340653459698e-05,
"loss": 1.1,
"step": 326
},
{
"epoch": 1.1121100519930676,
"grad_norm": 0.158203125,
"learning_rate": 9.16480868737001e-05,
"loss": 1.1898,
"step": 327
},
{
"epoch": 1.1155762564991334,
"grad_norm": 0.158203125,
"learning_rate": 9.10530651419099e-05,
"loss": 1.1475,
"step": 328
},
{
"epoch": 1.1190424610051992,
"grad_norm": 0.1806640625,
"learning_rate": 9.045836256481741e-05,
"loss": 1.0909,
"step": 329
},
{
"epoch": 1.122508665511265,
"grad_norm": 0.1435546875,
"learning_rate": 8.986400035662896e-05,
"loss": 1.1045,
"step": 330
},
{
"epoch": 1.125974870017331,
"grad_norm": 0.169921875,
"learning_rate": 8.926999971940909e-05,
"loss": 1.1219,
"step": 331
},
{
"epoch": 1.129441074523397,
"grad_norm": 0.1513671875,
"learning_rate": 8.867638184232446e-05,
"loss": 1.0883,
"step": 332
},
{
"epoch": 1.1329072790294628,
"grad_norm": 0.16015625,
"learning_rate": 8.808316790088801e-05,
"loss": 1.1688,
"step": 333
},
{
"epoch": 1.1363734835355286,
"grad_norm": 0.13671875,
"learning_rate": 8.749037905620334e-05,
"loss": 1.0564,
"step": 334
},
{
"epoch": 1.1398396880415944,
"grad_norm": 0.1484375,
"learning_rate": 8.689803645421011e-05,
"loss": 1.1058,
"step": 335
},
{
"epoch": 1.1433058925476602,
"grad_norm": 0.1640625,
"learning_rate": 8.630616122492967e-05,
"loss": 1.1395,
"step": 336
},
{
"epoch": 1.1467720970537263,
"grad_norm": 0.166015625,
"learning_rate": 8.57147744817112e-05,
"loss": 1.093,
"step": 337
},
{
"epoch": 1.1502383015597921,
"grad_norm": 0.1689453125,
"learning_rate": 8.512389732047859e-05,
"loss": 1.2032,
"step": 338
},
{
"epoch": 1.153704506065858,
"grad_norm": 0.162109375,
"learning_rate": 8.453355081897805e-05,
"loss": 1.1059,
"step": 339
},
{
"epoch": 1.1571707105719238,
"grad_norm": 0.1435546875,
"learning_rate": 8.394375603602603e-05,
"loss": 1.099,
"step": 340
},
{
"epoch": 1.1606369150779896,
"grad_norm": 0.1474609375,
"learning_rate": 8.335453401075818e-05,
"loss": 1.0928,
"step": 341
},
{
"epoch": 1.1641031195840554,
"grad_norm": 0.13671875,
"learning_rate": 8.276590576187869e-05,
"loss": 1.106,
"step": 342
},
{
"epoch": 1.1675693240901213,
"grad_norm": 0.1474609375,
"learning_rate": 8.217789228691055e-05,
"loss": 1.1044,
"step": 343
},
{
"epoch": 1.171035528596187,
"grad_norm": 0.181640625,
"learning_rate": 8.159051456144668e-05,
"loss": 1.1319,
"step": 344
},
{
"epoch": 1.174501733102253,
"grad_norm": 0.18359375,
"learning_rate": 8.10037935384015e-05,
"loss": 1.1364,
"step": 345
},
{
"epoch": 1.177967937608319,
"grad_norm": 0.1591796875,
"learning_rate": 8.041775014726353e-05,
"loss": 1.1426,
"step": 346
},
{
"epoch": 1.1814341421143848,
"grad_norm": 0.1396484375,
"learning_rate": 7.983240529334891e-05,
"loss": 1.1126,
"step": 347
},
{
"epoch": 1.1849003466204506,
"grad_norm": 0.1572265625,
"learning_rate": 7.924777985705556e-05,
"loss": 1.1447,
"step": 348
},
{
"epoch": 1.1883665511265165,
"grad_norm": 0.1806640625,
"learning_rate": 7.866389469311831e-05,
"loss": 1.0795,
"step": 349
},
{
"epoch": 1.1918327556325823,
"grad_norm": 0.1650390625,
"learning_rate": 7.808077062986514e-05,
"loss": 1.1454,
"step": 350
},
{
"epoch": 1.1952989601386481,
"grad_norm": 0.1689453125,
"learning_rate": 7.749842846847394e-05,
"loss": 1.135,
"step": 351
},
{
"epoch": 1.198765164644714,
"grad_norm": 0.154296875,
"learning_rate": 7.69168889822307e-05,
"loss": 1.1744,
"step": 352
},
{
"epoch": 1.20223136915078,
"grad_norm": 0.154296875,
"learning_rate": 7.63361729157885e-05,
"loss": 1.1319,
"step": 353
},
{
"epoch": 1.2056975736568458,
"grad_norm": 0.1787109375,
"learning_rate": 7.575630098442723e-05,
"loss": 1.1385,
"step": 354
},
{
"epoch": 1.2091637781629117,
"grad_norm": 0.1708984375,
"learning_rate": 7.517729387331495e-05,
"loss": 1.0953,
"step": 355
},
{
"epoch": 1.2126299826689775,
"grad_norm": 0.1455078125,
"learning_rate": 7.459917223676993e-05,
"loss": 1.1171,
"step": 356
},
{
"epoch": 1.2160961871750433,
"grad_norm": 0.154296875,
"learning_rate": 7.40219566975237e-05,
"loss": 1.0872,
"step": 357
},
{
"epoch": 1.2195623916811091,
"grad_norm": 0.1669921875,
"learning_rate": 7.344566784598564e-05,
"loss": 1.0873,
"step": 358
},
{
"epoch": 1.223028596187175,
"grad_norm": 0.1630859375,
"learning_rate": 7.287032623950827e-05,
"loss": 1.1548,
"step": 359
},
{
"epoch": 1.2264948006932408,
"grad_norm": 0.181640625,
"learning_rate": 7.229595240165405e-05,
"loss": 1.1514,
"step": 360
},
{
"epoch": 1.2299610051993068,
"grad_norm": 0.1474609375,
"learning_rate": 7.172256682146334e-05,
"loss": 1.1,
"step": 361
},
{
"epoch": 1.2334272097053727,
"grad_norm": 0.1474609375,
"learning_rate": 7.115018995272325e-05,
"loss": 1.0514,
"step": 362
},
{
"epoch": 1.2368934142114385,
"grad_norm": 0.1494140625,
"learning_rate": 7.057884221323825e-05,
"loss": 1.072,
"step": 363
},
{
"epoch": 1.2403596187175043,
"grad_norm": 0.1455078125,
"learning_rate": 7.000854398410182e-05,
"loss": 1.0855,
"step": 364
},
{
"epoch": 1.2438258232235702,
"grad_norm": 0.17578125,
"learning_rate": 6.943931560896921e-05,
"loss": 1.1645,
"step": 365
},
{
"epoch": 1.247292027729636,
"grad_norm": 0.15234375,
"learning_rate": 6.887117739333196e-05,
"loss": 1.0936,
"step": 366
},
{
"epoch": 1.250758232235702,
"grad_norm": 0.220703125,
"learning_rate": 6.830414960379352e-05,
"loss": 1.1273,
"step": 367
},
{
"epoch": 1.2542244367417679,
"grad_norm": 0.1630859375,
"learning_rate": 6.773825246734622e-05,
"loss": 1.1403,
"step": 368
},
{
"epoch": 1.2576906412478337,
"grad_norm": 0.1611328125,
"learning_rate": 6.717350617064972e-05,
"loss": 1.0864,
"step": 369
},
{
"epoch": 1.2611568457538995,
"grad_norm": 0.1376953125,
"learning_rate": 6.660993085931113e-05,
"loss": 1.0672,
"step": 370
},
{
"epoch": 1.2646230502599654,
"grad_norm": 0.1416015625,
"learning_rate": 6.604754663716603e-05,
"loss": 1.1255,
"step": 371
},
{
"epoch": 1.2680892547660312,
"grad_norm": 0.1748046875,
"learning_rate": 6.54863735655617e-05,
"loss": 1.166,
"step": 372
},
{
"epoch": 1.271555459272097,
"grad_norm": 0.1572265625,
"learning_rate": 6.492643166264116e-05,
"loss": 1.1351,
"step": 373
},
{
"epoch": 1.2750216637781628,
"grad_norm": 0.138671875,
"learning_rate": 6.436774090262925e-05,
"loss": 1.0707,
"step": 374
},
{
"epoch": 1.2784878682842287,
"grad_norm": 0.1591796875,
"learning_rate": 6.381032121512018e-05,
"loss": 1.1075,
"step": 375
},
{
"epoch": 1.2819540727902945,
"grad_norm": 0.15234375,
"learning_rate": 6.325419248436649e-05,
"loss": 1.1538,
"step": 376
},
{
"epoch": 1.2854202772963605,
"grad_norm": 0.16015625,
"learning_rate": 6.26993745485696e-05,
"loss": 1.1557,
"step": 377
},
{
"epoch": 1.2888864818024264,
"grad_norm": 0.15234375,
"learning_rate": 6.214588719917256e-05,
"loss": 1.1283,
"step": 378
},
{
"epoch": 1.2923526863084922,
"grad_norm": 0.1669921875,
"learning_rate": 6.159375018015364e-05,
"loss": 1.0724,
"step": 379
},
{
"epoch": 1.295818890814558,
"grad_norm": 0.1455078125,
"learning_rate": 6.104298318732218e-05,
"loss": 1.0947,
"step": 380
},
{
"epoch": 1.2992850953206239,
"grad_norm": 0.1435546875,
"learning_rate": 6.049360586761608e-05,
"loss": 1.1005,
"step": 381
},
{
"epoch": 1.30275129982669,
"grad_norm": 0.1650390625,
"learning_rate": 5.994563781840079e-05,
"loss": 1.0795,
"step": 382
},
{
"epoch": 1.3062175043327557,
"grad_norm": 0.1767578125,
"learning_rate": 5.939909858677035e-05,
"loss": 1.1423,
"step": 383
},
{
"epoch": 1.3096837088388216,
"grad_norm": 0.1708984375,
"learning_rate": 5.885400766885015e-05,
"loss": 1.1295,
"step": 384
},
{
"epoch": 1.3131499133448874,
"grad_norm": 0.1494140625,
"learning_rate": 5.83103845091013e-05,
"loss": 1.0783,
"step": 385
},
{
"epoch": 1.3166161178509532,
"grad_norm": 0.1728515625,
"learning_rate": 5.776824849962705e-05,
"loss": 1.0862,
"step": 386
},
{
"epoch": 1.320082322357019,
"grad_norm": 0.1591796875,
"learning_rate": 5.7227618979481324e-05,
"loss": 1.1009,
"step": 387
},
{
"epoch": 1.3235485268630849,
"grad_norm": 0.1455078125,
"learning_rate": 5.668851523397829e-05,
"loss": 1.1581,
"step": 388
},
{
"epoch": 1.3270147313691507,
"grad_norm": 0.150390625,
"learning_rate": 5.6150956494005035e-05,
"loss": 1.1683,
"step": 389
},
{
"epoch": 1.3304809358752165,
"grad_norm": 0.1728515625,
"learning_rate": 5.561496193533515e-05,
"loss": 1.139,
"step": 390
},
{
"epoch": 1.3339471403812824,
"grad_norm": 0.1513671875,
"learning_rate": 5.5080550677944685e-05,
"loss": 1.1546,
"step": 391
},
{
"epoch": 1.3374133448873484,
"grad_norm": 0.2392578125,
"learning_rate": 5.4547741785330505e-05,
"loss": 1.1741,
"step": 392
},
{
"epoch": 1.3408795493934142,
"grad_norm": 0.1650390625,
"learning_rate": 5.401655426382983e-05,
"loss": 1.1556,
"step": 393
},
{
"epoch": 1.34434575389948,
"grad_norm": 0.1826171875,
"learning_rate": 5.3487007061942276e-05,
"loss": 1.1247,
"step": 394
},
{
"epoch": 1.347811958405546,
"grad_norm": 0.193359375,
"learning_rate": 5.2959119069654336e-05,
"loss": 1.1126,
"step": 395
},
{
"epoch": 1.3512781629116117,
"grad_norm": 0.15234375,
"learning_rate": 5.243290911776497e-05,
"loss": 1.0862,
"step": 396
},
{
"epoch": 1.3547443674176776,
"grad_norm": 0.1435546875,
"learning_rate": 5.1908395977214267e-05,
"loss": 1.1614,
"step": 397
},
{
"epoch": 1.3582105719237436,
"grad_norm": 0.1552734375,
"learning_rate": 5.138559835841381e-05,
"loss": 1.1012,
"step": 398
},
{
"epoch": 1.3616767764298094,
"grad_norm": 0.1513671875,
"learning_rate": 5.0864534910579006e-05,
"loss": 1.1136,
"step": 399
},
{
"epoch": 1.3651429809358753,
"grad_norm": 0.166015625,
"learning_rate": 5.0345224221064025e-05,
"loss": 1.1379,
"step": 400
},
{
"epoch": 1.368609185441941,
"grad_norm": 0.1396484375,
"learning_rate": 4.982768481469886e-05,
"loss": 1.1354,
"step": 401
},
{
"epoch": 1.372075389948007,
"grad_norm": 0.1591796875,
"learning_rate": 4.931193515312813e-05,
"loss": 1.1271,
"step": 402
},
{
"epoch": 1.3755415944540728,
"grad_norm": 0.1455078125,
"learning_rate": 4.879799363415292e-05,
"loss": 1.1033,
"step": 403
},
{
"epoch": 1.3790077989601386,
"grad_norm": 0.158203125,
"learning_rate": 4.828587859107425e-05,
"loss": 1.1389,
"step": 404
},
{
"epoch": 1.3824740034662044,
"grad_norm": 0.314453125,
"learning_rate": 4.777560829203918e-05,
"loss": 1.212,
"step": 405
},
{
"epoch": 1.3859402079722702,
"grad_norm": 0.150390625,
"learning_rate": 4.726720093938921e-05,
"loss": 1.0696,
"step": 406
},
{
"epoch": 1.3894064124783363,
"grad_norm": 0.1650390625,
"learning_rate": 4.676067466901075e-05,
"loss": 1.15,
"step": 407
},
{
"epoch": 1.3928726169844021,
"grad_norm": 0.16796875,
"learning_rate": 4.625604754968839e-05,
"loss": 1.1417,
"step": 408
},
{
"epoch": 1.396338821490468,
"grad_norm": 0.1533203125,
"learning_rate": 4.5753337582460386e-05,
"loss": 1.1387,
"step": 409
},
{
"epoch": 1.3998050259965338,
"grad_norm": 0.15625,
"learning_rate": 4.525256269997621e-05,
"loss": 1.1206,
"step": 410
},
{
"epoch": 1.4032712305025996,
"grad_norm": 0.1611328125,
"learning_rate": 4.4753740765857214e-05,
"loss": 1.0487,
"step": 411
},
{
"epoch": 1.4067374350086654,
"grad_norm": 0.177734375,
"learning_rate": 4.4256889574059236e-05,
"loss": 1.1076,
"step": 412
},
{
"epoch": 1.4102036395147315,
"grad_norm": 0.205078125,
"learning_rate": 4.3762026848237845e-05,
"loss": 1.1193,
"step": 413
},
{
"epoch": 1.4136698440207973,
"grad_norm": 0.1552734375,
"learning_rate": 4.3269170241116155e-05,
"loss": 1.143,
"step": 414
},
{
"epoch": 1.4171360485268631,
"grad_norm": 0.1630859375,
"learning_rate": 4.277833733385508e-05,
"loss": 1.084,
"step": 415
},
{
"epoch": 1.420602253032929,
"grad_norm": 0.146484375,
"learning_rate": 4.2289545635426206e-05,
"loss": 1.0625,
"step": 416
},
{
"epoch": 1.4240684575389948,
"grad_norm": 0.15625,
"learning_rate": 4.1802812581987195e-05,
"loss": 1.1086,
"step": 417
},
{
"epoch": 1.4275346620450606,
"grad_norm": 0.14453125,
"learning_rate": 4.1318155536259795e-05,
"loss": 1.0716,
"step": 418
},
{
"epoch": 1.4310008665511265,
"grad_norm": 0.193359375,
"learning_rate": 4.083559178691053e-05,
"loss": 1.1149,
"step": 419
},
{
"epoch": 1.4344670710571923,
"grad_norm": 0.173828125,
"learning_rate": 4.035513854793389e-05,
"loss": 1.0656,
"step": 420
},
{
"epoch": 1.4379332755632581,
"grad_norm": 0.1650390625,
"learning_rate": 3.987681295803837e-05,
"loss": 1.1365,
"step": 421
},
{
"epoch": 1.4413994800693242,
"grad_norm": 0.15234375,
"learning_rate": 3.940063208003503e-05,
"loss": 1.1028,
"step": 422
},
{
"epoch": 1.44486568457539,
"grad_norm": 0.15625,
"learning_rate": 3.892661290022883e-05,
"loss": 1.1241,
"step": 423
},
{
"epoch": 1.4483318890814558,
"grad_norm": 0.1572265625,
"learning_rate": 3.845477232781278e-05,
"loss": 1.0888,
"step": 424
},
{
"epoch": 1.4517980935875217,
"grad_norm": 0.193359375,
"learning_rate": 3.7985127194264645e-05,
"loss": 1.1493,
"step": 425
},
{
"epoch": 1.4552642980935875,
"grad_norm": 0.154296875,
"learning_rate": 3.751769425274661e-05,
"loss": 1.1237,
"step": 426
},
{
"epoch": 1.4587305025996533,
"grad_norm": 0.1533203125,
"learning_rate": 3.70524901775076e-05,
"loss": 1.0921,
"step": 427
},
{
"epoch": 1.4621967071057194,
"grad_norm": 0.1494140625,
"learning_rate": 3.658953156328857e-05,
"loss": 1.0963,
"step": 428
},
{
"epoch": 1.4656629116117852,
"grad_norm": 0.150390625,
"learning_rate": 3.612883492473045e-05,
"loss": 1.05,
"step": 429
},
{
"epoch": 1.469129116117851,
"grad_norm": 0.16015625,
"learning_rate": 3.567041669578507e-05,
"loss": 1.0716,
"step": 430
},
{
"epoch": 1.4725953206239168,
"grad_norm": 0.1787109375,
"learning_rate": 3.5214293229128836e-05,
"loss": 1.1417,
"step": 431
},
{
"epoch": 1.4760615251299827,
"grad_norm": 0.1748046875,
"learning_rate": 3.476048079557967e-05,
"loss": 1.1555,
"step": 432
},
{
"epoch": 1.4795277296360485,
"grad_norm": 0.1640625,
"learning_rate": 3.4308995583516314e-05,
"loss": 1.1369,
"step": 433
},
{
"epoch": 1.4829939341421143,
"grad_norm": 0.140625,
"learning_rate": 3.3859853698300855e-05,
"loss": 1.0626,
"step": 434
},
{
"epoch": 1.4864601386481802,
"grad_norm": 0.1474609375,
"learning_rate": 3.3413071161704533e-05,
"loss": 1.1042,
"step": 435
},
{
"epoch": 1.489926343154246,
"grad_norm": 0.1591796875,
"learning_rate": 3.2968663911335894e-05,
"loss": 1.1128,
"step": 436
},
{
"epoch": 1.4933925476603118,
"grad_norm": 0.142578125,
"learning_rate": 3.252664780007225e-05,
"loss": 1.0455,
"step": 437
},
{
"epoch": 1.4968587521663779,
"grad_norm": 0.20703125,
"learning_rate": 3.208703859549457e-05,
"loss": 1.1243,
"step": 438
},
{
"epoch": 1.5003249566724437,
"grad_norm": 0.1865234375,
"learning_rate": 3.164985197932443e-05,
"loss": 1.1144,
"step": 439
},
{
"epoch": 1.5037911611785095,
"grad_norm": 0.166015625,
"learning_rate": 3.12151035468652e-05,
"loss": 1.1825,
"step": 440
},
{
"epoch": 1.5072573656845754,
"grad_norm": 0.1513671875,
"learning_rate": 3.078280880644535e-05,
"loss": 1.1083,
"step": 441
},
{
"epoch": 1.5107235701906414,
"grad_norm": 0.15625,
"learning_rate": 3.0352983178865256e-05,
"loss": 1.1029,
"step": 442
},
{
"epoch": 1.5141897746967072,
"grad_norm": 0.13671875,
"learning_rate": 2.9925641996847366e-05,
"loss": 1.0761,
"step": 443
},
{
"epoch": 1.517655979202773,
"grad_norm": 0.169921875,
"learning_rate": 2.9500800504489022e-05,
"loss": 1.1375,
"step": 444
},
{
"epoch": 1.521122183708839,
"grad_norm": 0.1416015625,
"learning_rate": 2.9078473856718636e-05,
"loss": 1.0885,
"step": 445
},
{
"epoch": 1.5245883882149047,
"grad_norm": 0.146484375,
"learning_rate": 2.8658677118755382e-05,
"loss": 1.0909,
"step": 446
},
{
"epoch": 1.5280545927209706,
"grad_norm": 0.15234375,
"learning_rate": 2.824142526557142e-05,
"loss": 1.1085,
"step": 447
},
{
"epoch": 1.5315207972270364,
"grad_norm": 0.1630859375,
"learning_rate": 2.7826733181357932e-05,
"loss": 1.1015,
"step": 448
},
{
"epoch": 1.5349870017331022,
"grad_norm": 0.185546875,
"learning_rate": 2.741461565899426e-05,
"loss": 1.0807,
"step": 449
},
{
"epoch": 1.538453206239168,
"grad_norm": 0.16796875,
"learning_rate": 2.7005087399519835e-05,
"loss": 1.1369,
"step": 450
},
{
"epoch": 1.5419194107452339,
"grad_norm": 0.1533203125,
"learning_rate": 2.6598163011610177e-05,
"loss": 1.0913,
"step": 451
},
{
"epoch": 1.5453856152512997,
"grad_norm": 0.17578125,
"learning_rate": 2.6193857011055622e-05,
"loss": 1.069,
"step": 452
},
{
"epoch": 1.5488518197573655,
"grad_norm": 0.283203125,
"learning_rate": 2.5792183820243332e-05,
"loss": 1.1139,
"step": 453
},
{
"epoch": 1.5523180242634316,
"grad_norm": 0.1552734375,
"learning_rate": 2.5393157767643228e-05,
"loss": 1.1423,
"step": 454
},
{
"epoch": 1.5557842287694974,
"grad_norm": 0.1806640625,
"learning_rate": 2.499679308729639e-05,
"loss": 1.0598,
"step": 455
},
{
"epoch": 1.5592504332755632,
"grad_norm": 0.197265625,
"learning_rate": 2.4603103918307625e-05,
"loss": 1.0956,
"step": 456
},
{
"epoch": 1.5627166377816293,
"grad_norm": 0.1552734375,
"learning_rate": 2.4212104304341122e-05,
"loss": 1.0581,
"step": 457
},
{
"epoch": 1.566182842287695,
"grad_norm": 0.1484375,
"learning_rate": 2.3823808193119178e-05,
"loss": 1.1031,
"step": 458
},
{
"epoch": 1.569649046793761,
"grad_norm": 0.15625,
"learning_rate": 2.3438229435924952e-05,
"loss": 1.1372,
"step": 459
},
{
"epoch": 1.5731152512998268,
"grad_norm": 0.1572265625,
"learning_rate": 2.305538178710831e-05,
"loss": 1.0553,
"step": 460
},
{
"epoch": 1.5765814558058926,
"grad_norm": 0.146484375,
"learning_rate": 2.2675278903594988e-05,
"loss": 1.0975,
"step": 461
},
{
"epoch": 1.5800476603119584,
"grad_norm": 0.1630859375,
"learning_rate": 2.2297934344399695e-05,
"loss": 1.093,
"step": 462
},
{
"epoch": 1.5835138648180243,
"grad_norm": 0.15234375,
"learning_rate": 2.192336157014223e-05,
"loss": 1.1643,
"step": 463
},
{
"epoch": 1.58698006932409,
"grad_norm": 0.1708984375,
"learning_rate": 2.155157394256745e-05,
"loss": 1.022,
"step": 464
},
{
"epoch": 1.590446273830156,
"grad_norm": 0.162109375,
"learning_rate": 2.118258472406851e-05,
"loss": 1.1496,
"step": 465
},
{
"epoch": 1.5939124783362217,
"grad_norm": 0.18359375,
"learning_rate": 2.0816407077213896e-05,
"loss": 1.0578,
"step": 466
},
{
"epoch": 1.5973786828422876,
"grad_norm": 0.1611328125,
"learning_rate": 2.0453054064277756e-05,
"loss": 1.058,
"step": 467
},
{
"epoch": 1.6008448873483534,
"grad_norm": 0.154296875,
"learning_rate": 2.0092538646774072e-05,
"loss": 1.0986,
"step": 468
},
{
"epoch": 1.6043110918544194,
"grad_norm": 0.1455078125,
"learning_rate": 1.9734873684994205e-05,
"loss": 1.0798,
"step": 469
},
{
"epoch": 1.6077772963604853,
"grad_norm": 0.158203125,
"learning_rate": 1.938007193754816e-05,
"loss": 1.1225,
"step": 470
},
{
"epoch": 1.611243500866551,
"grad_norm": 0.1708984375,
"learning_rate": 1.9028146060909523e-05,
"loss": 1.1442,
"step": 471
},
{
"epoch": 1.614709705372617,
"grad_norm": 0.1591796875,
"learning_rate": 1.8679108608963903e-05,
"loss": 1.0768,
"step": 472
},
{
"epoch": 1.618175909878683,
"grad_norm": 0.1435546875,
"learning_rate": 1.8332972032561124e-05,
"loss": 1.0882,
"step": 473
},
{
"epoch": 1.6216421143847488,
"grad_norm": 0.1572265625,
"learning_rate": 1.7989748679071138e-05,
"loss": 1.1352,
"step": 474
},
{
"epoch": 1.6251083188908146,
"grad_norm": 0.1455078125,
"learning_rate": 1.764945079194349e-05,
"loss": 1.1494,
"step": 475
},
{
"epoch": 1.6285745233968805,
"grad_norm": 0.19921875,
"learning_rate": 1.7312090510270627e-05,
"loss": 1.1503,
"step": 476
},
{
"epoch": 1.6320407279029463,
"grad_norm": 0.216796875,
"learning_rate": 1.6977679868354844e-05,
"loss": 1.1132,
"step": 477
},
{
"epoch": 1.6355069324090121,
"grad_norm": 0.1396484375,
"learning_rate": 1.6646230795279026e-05,
"loss": 1.0362,
"step": 478
},
{
"epoch": 1.638973136915078,
"grad_norm": 0.2109375,
"learning_rate": 1.6317755114481058e-05,
"loss": 1.1432,
"step": 479
},
{
"epoch": 1.6424393414211438,
"grad_norm": 0.1376953125,
"learning_rate": 1.5992264543332124e-05,
"loss": 1.069,
"step": 480
},
{
"epoch": 1.6459055459272096,
"grad_norm": 0.140625,
"learning_rate": 1.566977069271872e-05,
"loss": 1.1402,
"step": 481
},
{
"epoch": 1.6493717504332754,
"grad_norm": 0.158203125,
"learning_rate": 1.5350285066628343e-05,
"loss": 1.1141,
"step": 482
},
{
"epoch": 1.6528379549393413,
"grad_norm": 0.16796875,
"learning_rate": 1.5033819061739373e-05,
"loss": 1.1801,
"step": 483
},
{
"epoch": 1.6563041594454073,
"grad_norm": 0.142578125,
"learning_rate": 1.4720383967014306e-05,
"loss": 1.1163,
"step": 484
},
{
"epoch": 1.6597703639514731,
"grad_norm": 0.1435546875,
"learning_rate": 1.4409990963297093e-05,
"loss": 1.1005,
"step": 485
},
{
"epoch": 1.663236568457539,
"grad_norm": 0.1484375,
"learning_rate": 1.4102651122914434e-05,
"loss": 1.0599,
"step": 486
},
{
"epoch": 1.6667027729636048,
"grad_norm": 0.13671875,
"learning_rate": 1.379837540928065e-05,
"loss": 1.0632,
"step": 487
},
{
"epoch": 1.6701689774696709,
"grad_norm": 0.169921875,
"learning_rate": 1.3497174676506674e-05,
"loss": 1.1545,
"step": 488
},
{
"epoch": 1.6736351819757367,
"grad_norm": 0.1435546875,
"learning_rate": 1.319905966901286e-05,
"loss": 1.0175,
"step": 489
},
{
"epoch": 1.6771013864818025,
"grad_norm": 0.1728515625,
"learning_rate": 1.2904041021145596e-05,
"loss": 1.0411,
"step": 490
},
{
"epoch": 1.6805675909878683,
"grad_norm": 0.1611328125,
"learning_rate": 1.2612129256798221e-05,
"loss": 1.1375,
"step": 491
},
{
"epoch": 1.6840337954939342,
"grad_norm": 0.20703125,
"learning_rate": 1.2323334789035367e-05,
"loss": 1.1401,
"step": 492
},
{
"epoch": 1.6875,
"grad_norm": 0.16796875,
"learning_rate": 1.2037667919721506e-05,
"loss": 1.0897,
"step": 493
},
{
"epoch": 1.6909662045060658,
"grad_norm": 0.1669921875,
"learning_rate": 1.17551388391537e-05,
"loss": 1.1001,
"step": 494
},
{
"epoch": 1.6944324090121317,
"grad_norm": 0.173828125,
"learning_rate": 1.1475757625697858e-05,
"loss": 1.0554,
"step": 495
},
{
"epoch": 1.6978986135181975,
"grad_norm": 0.16015625,
"learning_rate": 1.1199534245429255e-05,
"loss": 1.0708,
"step": 496
},
{
"epoch": 1.7013648180242633,
"grad_norm": 0.1591796875,
"learning_rate": 1.0926478551777197e-05,
"loss": 1.1243,
"step": 497
},
{
"epoch": 1.7048310225303291,
"grad_norm": 0.140625,
"learning_rate": 1.0656600285173258e-05,
"loss": 1.1422,
"step": 498
},
{
"epoch": 1.7082972270363952,
"grad_norm": 0.1484375,
"learning_rate": 1.0389909072704041e-05,
"loss": 1.1089,
"step": 499
},
{
"epoch": 1.711763431542461,
"grad_norm": 0.1806640625,
"learning_rate": 1.0126414427767717e-05,
"loss": 1.1462,
"step": 500
},
{
"epoch": 1.7152296360485269,
"grad_norm": 0.6171875,
"learning_rate": 9.866125749734534e-06,
"loss": 1.0778,
"step": 501
},
{
"epoch": 1.7186958405545927,
"grad_norm": 0.1728515625,
"learning_rate": 9.609052323611666e-06,
"loss": 1.0788,
"step": 502
},
{
"epoch": 1.7221620450606587,
"grad_norm": 0.173828125,
"learning_rate": 9.355203319712025e-06,
"loss": 1.0783,
"step": 503
},
{
"epoch": 1.7256282495667246,
"grad_norm": 0.1611328125,
"learning_rate": 9.104587793326901e-06,
"loss": 1.0905,
"step": 504
},
{
"epoch": 1.7290944540727904,
"grad_norm": 0.1513671875,
"learning_rate": 8.85721468440327e-06,
"loss": 1.095,
"step": 505
},
{
"epoch": 1.7325606585788562,
"grad_norm": 0.169921875,
"learning_rate": 8.613092817224611e-06,
"loss": 1.0875,
"step": 506
},
{
"epoch": 1.736026863084922,
"grad_norm": 0.16796875,
"learning_rate": 8.372230900096256e-06,
"loss": 1.0839,
"step": 507
},
{
"epoch": 1.7394930675909879,
"grad_norm": 0.1494140625,
"learning_rate": 8.134637525034839e-06,
"loss": 1.1135,
"step": 508
},
{
"epoch": 1.7429592720970537,
"grad_norm": 0.1494140625,
"learning_rate": 7.900321167461523e-06,
"loss": 1.1053,
"step": 509
},
{
"epoch": 1.7464254766031195,
"grad_norm": 0.1533203125,
"learning_rate": 7.669290185899946e-06,
"loss": 1.1486,
"step": 510
},
{
"epoch": 1.7498916811091854,
"grad_norm": 0.162109375,
"learning_rate": 7.441552821677966e-06,
"loss": 1.1893,
"step": 511
},
{
"epoch": 1.7533578856152512,
"grad_norm": 0.1533203125,
"learning_rate": 7.217117198633561e-06,
"loss": 1.1413,
"step": 512
},
{
"epoch": 1.756824090121317,
"grad_norm": 0.1650390625,
"learning_rate": 6.995991322825191e-06,
"loss": 1.1527,
"step": 513
},
{
"epoch": 1.760290294627383,
"grad_norm": 0.13671875,
"learning_rate": 6.778183082246148e-06,
"loss": 1.0925,
"step": 514
},
{
"epoch": 1.763756499133449,
"grad_norm": 0.169921875,
"learning_rate": 6.563700246543159e-06,
"loss": 1.1325,
"step": 515
},
{
"epoch": 1.7672227036395147,
"grad_norm": 0.1572265625,
"learning_rate": 6.3525504667392595e-06,
"loss": 1.1153,
"step": 516
},
{
"epoch": 1.7706889081455806,
"grad_norm": 0.1572265625,
"learning_rate": 6.14474127496083e-06,
"loss": 1.1237,
"step": 517
},
{
"epoch": 1.7741551126516466,
"grad_norm": 0.1396484375,
"learning_rate": 5.940280084168947e-06,
"loss": 1.1277,
"step": 518
},
{
"epoch": 1.7776213171577124,
"grad_norm": 0.1572265625,
"learning_rate": 5.739174187894925e-06,
"loss": 1.0155,
"step": 519
},
{
"epoch": 1.7810875216637783,
"grad_norm": 0.1611328125,
"learning_rate": 5.541430759980138e-06,
"loss": 1.1192,
"step": 520
},
{
"epoch": 1.784553726169844,
"grad_norm": 0.15625,
"learning_rate": 5.3470568543201314e-06,
"loss": 1.1155,
"step": 521
},
{
"epoch": 1.78801993067591,
"grad_norm": 0.1376953125,
"learning_rate": 5.1560594046130115e-06,
"loss": 1.0493,
"step": 522
},
{
"epoch": 1.7914861351819757,
"grad_norm": 0.1513671875,
"learning_rate": 4.968445224112051e-06,
"loss": 1.0978,
"step": 523
},
{
"epoch": 1.7949523396880416,
"grad_norm": 0.1552734375,
"learning_rate": 4.784221005382705e-06,
"loss": 1.1046,
"step": 524
},
{
"epoch": 1.7984185441941074,
"grad_norm": 0.15625,
"learning_rate": 4.603393320063831e-06,
"loss": 1.1501,
"step": 525
},
{
"epoch": 1.8018847487001732,
"grad_norm": 0.15625,
"learning_rate": 4.425968618633292e-06,
"loss": 1.1305,
"step": 526
},
{
"epoch": 1.805350953206239,
"grad_norm": 0.171875,
"learning_rate": 4.251953230177852e-06,
"loss": 1.1878,
"step": 527
},
{
"epoch": 1.808817157712305,
"grad_norm": 0.171875,
"learning_rate": 4.081353362167406e-06,
"loss": 1.0561,
"step": 528
},
{
"epoch": 1.8122833622183707,
"grad_norm": 0.162109375,
"learning_rate": 3.91417510023353e-06,
"loss": 1.1446,
"step": 529
},
{
"epoch": 1.8157495667244368,
"grad_norm": 0.189453125,
"learning_rate": 3.750424407952402e-06,
"loss": 1.1122,
"step": 530
},
{
"epoch": 1.8192157712305026,
"grad_norm": 0.166015625,
"learning_rate": 3.5901071266320805e-06,
"loss": 1.1353,
"step": 531
},
{
"epoch": 1.8226819757365684,
"grad_norm": 0.1572265625,
"learning_rate": 3.4332289751041526e-06,
"loss": 1.1133,
"step": 532
},
{
"epoch": 1.8261481802426345,
"grad_norm": 0.14453125,
"learning_rate": 3.2797955495196485e-06,
"loss": 1.0818,
"step": 533
},
{
"epoch": 1.8296143847487003,
"grad_norm": 0.2373046875,
"learning_rate": 3.129812323149528e-06,
"loss": 1.0962,
"step": 534
},
{
"epoch": 1.8330805892547661,
"grad_norm": 0.1728515625,
"learning_rate": 2.983284646189377e-06,
"loss": 1.1154,
"step": 535
},
{
"epoch": 1.836546793760832,
"grad_norm": 0.1611328125,
"learning_rate": 2.8402177455685296e-06,
"loss": 1.1198,
"step": 536
},
{
"epoch": 1.8400129982668978,
"grad_norm": 0.169921875,
"learning_rate": 2.7006167247636825e-06,
"loss": 1.0789,
"step": 537
},
{
"epoch": 1.8434792027729636,
"grad_norm": 0.1357421875,
"learning_rate": 2.564486563616786e-06,
"loss": 1.0876,
"step": 538
},
{
"epoch": 1.8469454072790294,
"grad_norm": 0.1435546875,
"learning_rate": 2.4318321181574287e-06,
"loss": 1.1282,
"step": 539
},
{
"epoch": 1.8504116117850953,
"grad_norm": 0.146484375,
"learning_rate": 2.3026581204296347e-06,
"loss": 1.136,
"step": 540
},
{
"epoch": 1.853877816291161,
"grad_norm": 0.1513671875,
"learning_rate": 2.176969178322985e-06,
"loss": 1.0921,
"step": 541
},
{
"epoch": 1.857344020797227,
"grad_norm": 0.15625,
"learning_rate": 2.0547697754083605e-06,
"loss": 1.1621,
"step": 542
},
{
"epoch": 1.8608102253032928,
"grad_norm": 0.40234375,
"learning_rate": 1.9360642707779152e-06,
"loss": 1.1306,
"step": 543
},
{
"epoch": 1.8642764298093586,
"grad_norm": 0.1455078125,
"learning_rate": 1.8208568988895558e-06,
"loss": 1.0582,
"step": 544
},
{
"epoch": 1.8677426343154246,
"grad_norm": 0.1728515625,
"learning_rate": 1.7091517694160286e-06,
"loss": 1.1457,
"step": 545
},
{
"epoch": 1.8712088388214905,
"grad_norm": 0.189453125,
"learning_rate": 1.6009528670981711e-06,
"loss": 1.1381,
"step": 546
},
{
"epoch": 1.8746750433275563,
"grad_norm": 0.1455078125,
"learning_rate": 1.4962640516028248e-06,
"loss": 1.1462,
"step": 547
},
{
"epoch": 1.8781412478336221,
"grad_norm": 0.1611328125,
"learning_rate": 1.3950890573852126e-06,
"loss": 1.1267,
"step": 548
},
{
"epoch": 1.8816074523396882,
"grad_norm": 0.251953125,
"learning_rate": 1.297431493555612e-06,
"loss": 1.1547,
"step": 549
},
{
"epoch": 1.885073656845754,
"grad_norm": 0.1962890625,
"learning_rate": 1.2032948437506576e-06,
"loss": 1.1553,
"step": 550
},
{
"epoch": 1.8885398613518198,
"grad_norm": 0.1669921875,
"learning_rate": 1.1126824660091516e-06,
"loss": 1.1248,
"step": 551
},
{
"epoch": 1.8920060658578857,
"grad_norm": 0.142578125,
"learning_rate": 1.0255975926521166e-06,
"loss": 1.0748,
"step": 552
},
{
"epoch": 1.8954722703639515,
"grad_norm": 0.1650390625,
"learning_rate": 9.420433301676634e-07,
"loss": 1.0636,
"step": 553
},
{
"epoch": 1.8989384748700173,
"grad_norm": 0.1513671875,
"learning_rate": 8.620226591000479e-07,
"loss": 1.0486,
"step": 554
},
{
"epoch": 1.9024046793760832,
"grad_norm": 0.1435546875,
"learning_rate": 7.855384339433891e-07,
"loss": 1.1151,
"step": 555
},
{
"epoch": 1.905870883882149,
"grad_norm": 0.1474609375,
"learning_rate": 7.125933830398945e-07,
"loss": 1.1077,
"step": 556
},
{
"epoch": 1.9093370883882148,
"grad_norm": 0.142578125,
"learning_rate": 6.431901084824499e-07,
"loss": 1.0966,
"step": 557
},
{
"epoch": 1.9128032928942806,
"grad_norm": 0.15234375,
"learning_rate": 5.773310860218373e-07,
"loss": 1.0981,
"step": 558
},
{
"epoch": 1.9162694974003465,
"grad_norm": 0.1572265625,
"learning_rate": 5.150186649784728e-07,
"loss": 1.0728,
"step": 559
},
{
"epoch": 1.9197357019064125,
"grad_norm": 0.1455078125,
"learning_rate": 4.562550681584954e-07,
"loss": 1.0585,
"step": 560
},
{
"epoch": 1.9232019064124783,
"grad_norm": 0.21875,
"learning_rate": 4.0104239177454206e-07,
"loss": 1.1047,
"step": 561
},
{
"epoch": 1.9266681109185442,
"grad_norm": 0.1435546875,
"learning_rate": 3.4938260537098476e-07,
"loss": 1.1022,
"step": 562
},
{
"epoch": 1.93013431542461,
"grad_norm": 0.1533203125,
"learning_rate": 3.0127755175362037e-07,
"loss": 1.1819,
"step": 563
},
{
"epoch": 1.933600519930676,
"grad_norm": 0.150390625,
"learning_rate": 2.567289469239786e-07,
"loss": 1.0916,
"step": 564
},
{
"epoch": 1.9370667244367419,
"grad_norm": 0.32421875,
"learning_rate": 2.1573838001807123e-07,
"loss": 1.1052,
"step": 565
},
{
"epoch": 1.9405329289428077,
"grad_norm": 0.166015625,
"learning_rate": 1.7830731324977036e-07,
"loss": 1.1111,
"step": 566
},
{
"epoch": 1.9439991334488735,
"grad_norm": 0.1591796875,
"learning_rate": 1.4443708185853943e-07,
"loss": 1.1349,
"step": 567
},
{
"epoch": 1.9474653379549394,
"grad_norm": 0.1337890625,
"learning_rate": 1.1412889406192673e-07,
"loss": 1.0895,
"step": 568
},
{
"epoch": 1.9509315424610052,
"grad_norm": 0.15234375,
"learning_rate": 8.738383101235537e-08,
"loss": 1.1197,
"step": 569
},
{
"epoch": 1.954397746967071,
"grad_norm": 0.1728515625,
"learning_rate": 6.420284675865418e-08,
"loss": 1.1312,
"step": 570
},
{
"epoch": 1.9578639514731369,
"grad_norm": 0.380859375,
"learning_rate": 4.458676821194052e-08,
"loss": 1.0988,
"step": 571
},
{
"epoch": 1.9613301559792027,
"grad_norm": 0.1650390625,
"learning_rate": 2.853629511617717e-08,
"loss": 1.0989,
"step": 572
},
{
"epoch": 1.9647963604852685,
"grad_norm": 0.205078125,
"learning_rate": 1.6052000023192292e-08,
"loss": 1.1366,
"step": 573
},
{
"epoch": 1.9682625649913343,
"grad_norm": 0.1572265625,
"learning_rate": 7.134328272240254e-09,
"loss": 1.0905,
"step": 574
},
{
"epoch": 1.9717287694974004,
"grad_norm": 0.1416015625,
"learning_rate": 1.7835979741698261e-09,
"loss": 1.1114,
"step": 575
},
{
"epoch": 1.9751949740034662,
"grad_norm": 0.59765625,
"learning_rate": 0.0,
"loss": 1.1027,
"step": 576
}
],
"logging_steps": 1,
"max_steps": 576,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 288,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.639351900105605e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}