lora-8b-code / checkpoint-969 /trainer_state.json
kloodia's picture
Upload folder using huggingface_hub
3305bc0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9992266047950503,
"eval_steps": 243,
"global_step": 969,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.08921732753515244,
"learning_rate": 2e-05,
"loss": 0.8982,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 0.910159707069397,
"eval_runtime": 111.5727,
"eval_samples_per_second": 89.072,
"eval_steps_per_second": 22.272,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 0.07926193624734879,
"learning_rate": 4e-05,
"loss": 0.8904,
"step": 2
},
{
"epoch": 0.0,
"grad_norm": 0.09000936150550842,
"learning_rate": 6e-05,
"loss": 0.8817,
"step": 3
},
{
"epoch": 0.0,
"grad_norm": 0.08947216719388962,
"learning_rate": 8e-05,
"loss": 0.8865,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 0.09538354724645615,
"learning_rate": 0.0001,
"loss": 0.7988,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 0.11288489401340485,
"learning_rate": 0.00012,
"loss": 0.8345,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 0.08290982991456985,
"learning_rate": 0.00014,
"loss": 0.8137,
"step": 7
},
{
"epoch": 0.01,
"grad_norm": 0.06841357797384262,
"learning_rate": 0.00016,
"loss": 0.7308,
"step": 8
},
{
"epoch": 0.01,
"grad_norm": 0.07010111957788467,
"learning_rate": 0.00018,
"loss": 0.8572,
"step": 9
},
{
"epoch": 0.01,
"grad_norm": 0.07711824029684067,
"learning_rate": 0.0002,
"loss": 0.7942,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 0.06440314650535583,
"learning_rate": 0.00019999994120062405,
"loss": 0.7049,
"step": 11
},
{
"epoch": 0.01,
"grad_norm": 0.0699019655585289,
"learning_rate": 0.0001999997648025654,
"loss": 0.8176,
"step": 12
},
{
"epoch": 0.01,
"grad_norm": 0.07182443886995316,
"learning_rate": 0.00019999947080603144,
"loss": 0.8583,
"step": 13
},
{
"epoch": 0.01,
"grad_norm": 0.057579606771469116,
"learning_rate": 0.00019999905921136795,
"loss": 0.8021,
"step": 14
},
{
"epoch": 0.02,
"grad_norm": 0.051665112376213074,
"learning_rate": 0.0001999985300190589,
"loss": 0.7698,
"step": 15
},
{
"epoch": 0.02,
"grad_norm": 0.05774515122175217,
"learning_rate": 0.00019999788322972668,
"loss": 0.7913,
"step": 16
},
{
"epoch": 0.02,
"grad_norm": 0.054730940610170364,
"learning_rate": 0.0001999971188441319,
"loss": 0.7205,
"step": 17
},
{
"epoch": 0.02,
"grad_norm": 0.05652638524770737,
"learning_rate": 0.00019999623686317342,
"loss": 0.7844,
"step": 18
},
{
"epoch": 0.02,
"grad_norm": 0.05296637862920761,
"learning_rate": 0.00019999523728788846,
"loss": 0.7022,
"step": 19
},
{
"epoch": 0.02,
"grad_norm": 0.05956929177045822,
"learning_rate": 0.00019999412011945252,
"loss": 0.7541,
"step": 20
},
{
"epoch": 0.02,
"grad_norm": 0.050489749759435654,
"learning_rate": 0.00019999288535917938,
"loss": 0.7155,
"step": 21
},
{
"epoch": 0.02,
"grad_norm": 0.048119645565748215,
"learning_rate": 0.00019999153300852106,
"loss": 0.757,
"step": 22
},
{
"epoch": 0.02,
"grad_norm": 0.050988584756851196,
"learning_rate": 0.00019999006306906797,
"loss": 0.7282,
"step": 23
},
{
"epoch": 0.02,
"grad_norm": 0.04979432374238968,
"learning_rate": 0.00019998847554254868,
"loss": 0.79,
"step": 24
},
{
"epoch": 0.03,
"grad_norm": 0.04835788160562515,
"learning_rate": 0.00019998677043083017,
"loss": 0.7097,
"step": 25
},
{
"epoch": 0.03,
"grad_norm": 0.05280015245079994,
"learning_rate": 0.00019998494773591758,
"loss": 0.8246,
"step": 26
},
{
"epoch": 0.03,
"grad_norm": 0.05030938982963562,
"learning_rate": 0.00019998300745995438,
"loss": 0.7865,
"step": 27
},
{
"epoch": 0.03,
"grad_norm": 0.050140462815761566,
"learning_rate": 0.00019998094960522233,
"loss": 0.7679,
"step": 28
},
{
"epoch": 0.03,
"grad_norm": 0.053974639624357224,
"learning_rate": 0.00019997877417414142,
"loss": 0.7476,
"step": 29
},
{
"epoch": 0.03,
"grad_norm": 0.048135802149772644,
"learning_rate": 0.00019997648116926994,
"loss": 0.7894,
"step": 30
},
{
"epoch": 0.03,
"grad_norm": 0.05285784229636192,
"learning_rate": 0.00019997407059330443,
"loss": 0.7967,
"step": 31
},
{
"epoch": 0.03,
"grad_norm": 0.06759033352136612,
"learning_rate": 0.00019997154244907973,
"loss": 0.8675,
"step": 32
},
{
"epoch": 0.03,
"grad_norm": 0.05007735639810562,
"learning_rate": 0.00019996889673956885,
"loss": 0.8439,
"step": 33
},
{
"epoch": 0.04,
"grad_norm": 0.044428203254938126,
"learning_rate": 0.00019996613346788316,
"loss": 0.7767,
"step": 34
},
{
"epoch": 0.04,
"grad_norm": 0.04795186221599579,
"learning_rate": 0.00019996325263727218,
"loss": 0.7618,
"step": 35
},
{
"epoch": 0.04,
"grad_norm": 0.045297641307115555,
"learning_rate": 0.00019996025425112377,
"loss": 0.7379,
"step": 36
},
{
"epoch": 0.04,
"grad_norm": 0.04418836906552315,
"learning_rate": 0.000199957138312964,
"loss": 0.7092,
"step": 37
},
{
"epoch": 0.04,
"grad_norm": 0.05495867133140564,
"learning_rate": 0.0001999539048264572,
"loss": 0.7708,
"step": 38
},
{
"epoch": 0.04,
"grad_norm": 0.047341588884592056,
"learning_rate": 0.0001999505537954058,
"loss": 0.7829,
"step": 39
},
{
"epoch": 0.04,
"grad_norm": 0.05130796134471893,
"learning_rate": 0.0001999470852237507,
"loss": 0.701,
"step": 40
},
{
"epoch": 0.04,
"grad_norm": 0.04843367636203766,
"learning_rate": 0.00019994349911557082,
"loss": 0.7624,
"step": 41
},
{
"epoch": 0.04,
"grad_norm": 0.05179371312260628,
"learning_rate": 0.0001999397954750834,
"loss": 0.7432,
"step": 42
},
{
"epoch": 0.04,
"grad_norm": 0.052478544414043427,
"learning_rate": 0.00019993597430664387,
"loss": 0.7313,
"step": 43
},
{
"epoch": 0.05,
"grad_norm": 0.05324641987681389,
"learning_rate": 0.00019993203561474588,
"loss": 0.8092,
"step": 44
},
{
"epoch": 0.05,
"grad_norm": 0.051216114312410355,
"learning_rate": 0.0001999279794040213,
"loss": 0.6828,
"step": 45
},
{
"epoch": 0.05,
"grad_norm": 0.0454455241560936,
"learning_rate": 0.0001999238056792401,
"loss": 0.7963,
"step": 46
},
{
"epoch": 0.05,
"grad_norm": 0.04751123487949371,
"learning_rate": 0.00019991951444531066,
"loss": 0.8264,
"step": 47
},
{
"epoch": 0.05,
"grad_norm": 0.05125482380390167,
"learning_rate": 0.00019991510570727933,
"loss": 0.7282,
"step": 48
},
{
"epoch": 0.05,
"grad_norm": 0.05229455232620239,
"learning_rate": 0.00019991057947033074,
"loss": 0.7409,
"step": 49
},
{
"epoch": 0.05,
"grad_norm": 0.046746984124183655,
"learning_rate": 0.00019990593573978772,
"loss": 0.8096,
"step": 50
},
{
"epoch": 0.05,
"grad_norm": 0.0461801253259182,
"learning_rate": 0.00019990117452111124,
"loss": 0.7995,
"step": 51
},
{
"epoch": 0.05,
"grad_norm": 0.04826509580016136,
"learning_rate": 0.00019989629581990037,
"loss": 0.795,
"step": 52
},
{
"epoch": 0.05,
"grad_norm": 0.04601113870739937,
"learning_rate": 0.00019989129964189246,
"loss": 0.7805,
"step": 53
},
{
"epoch": 0.06,
"grad_norm": 0.05406435951590538,
"learning_rate": 0.00019988618599296295,
"loss": 0.7585,
"step": 54
},
{
"epoch": 0.06,
"grad_norm": 0.054005216807127,
"learning_rate": 0.00019988095487912539,
"loss": 0.7165,
"step": 55
},
{
"epoch": 0.06,
"grad_norm": 0.04769807681441307,
"learning_rate": 0.00019987560630653156,
"loss": 0.7695,
"step": 56
},
{
"epoch": 0.06,
"grad_norm": 0.04815264418721199,
"learning_rate": 0.00019987014028147125,
"loss": 0.6944,
"step": 57
},
{
"epoch": 0.06,
"grad_norm": 0.051092248409986496,
"learning_rate": 0.00019986455681037248,
"loss": 0.7619,
"step": 58
},
{
"epoch": 0.06,
"grad_norm": 0.048955272883176804,
"learning_rate": 0.00019985885589980136,
"loss": 0.7827,
"step": 59
},
{
"epoch": 0.06,
"grad_norm": 0.049241162836551666,
"learning_rate": 0.00019985303755646206,
"loss": 0.6925,
"step": 60
},
{
"epoch": 0.06,
"grad_norm": 0.05324079468846321,
"learning_rate": 0.00019984710178719685,
"loss": 0.7269,
"step": 61
},
{
"epoch": 0.06,
"grad_norm": 0.05057024210691452,
"learning_rate": 0.0001998410485989862,
"loss": 0.7598,
"step": 62
},
{
"epoch": 0.06,
"grad_norm": 0.05710586905479431,
"learning_rate": 0.0001998348779989485,
"loss": 0.8153,
"step": 63
},
{
"epoch": 0.07,
"grad_norm": 0.05069264769554138,
"learning_rate": 0.00019982858999434034,
"loss": 0.6319,
"step": 64
},
{
"epoch": 0.07,
"grad_norm": 0.0459558442234993,
"learning_rate": 0.00019982218459255635,
"loss": 0.7189,
"step": 65
},
{
"epoch": 0.07,
"grad_norm": 0.054699912667274475,
"learning_rate": 0.00019981566180112917,
"loss": 0.7845,
"step": 66
},
{
"epoch": 0.07,
"grad_norm": 0.045664116740226746,
"learning_rate": 0.0001998090216277295,
"loss": 0.7308,
"step": 67
},
{
"epoch": 0.07,
"grad_norm": 0.046552181243896484,
"learning_rate": 0.0001998022640801662,
"loss": 0.6392,
"step": 68
},
{
"epoch": 0.07,
"grad_norm": 0.048938997089862823,
"learning_rate": 0.00019979538916638597,
"loss": 0.7591,
"step": 69
},
{
"epoch": 0.07,
"grad_norm": 0.051963839679956436,
"learning_rate": 0.00019978839689447365,
"loss": 0.7054,
"step": 70
},
{
"epoch": 0.07,
"grad_norm": 0.05242505297064781,
"learning_rate": 0.00019978128727265207,
"loss": 0.7467,
"step": 71
},
{
"epoch": 0.07,
"grad_norm": 0.04921518638730049,
"learning_rate": 0.00019977406030928204,
"loss": 0.7793,
"step": 72
},
{
"epoch": 0.08,
"grad_norm": 0.052262112498283386,
"learning_rate": 0.00019976671601286237,
"loss": 0.7193,
"step": 73
},
{
"epoch": 0.08,
"grad_norm": 0.053219135850667953,
"learning_rate": 0.0001997592543920299,
"loss": 0.7483,
"step": 74
},
{
"epoch": 0.08,
"grad_norm": 0.049095865339040756,
"learning_rate": 0.00019975167545555942,
"loss": 0.7544,
"step": 75
},
{
"epoch": 0.08,
"grad_norm": 0.052104122936725616,
"learning_rate": 0.0001997439792123636,
"loss": 0.7227,
"step": 76
},
{
"epoch": 0.08,
"grad_norm": 0.05156683921813965,
"learning_rate": 0.00019973616567149313,
"loss": 0.7752,
"step": 77
},
{
"epoch": 0.08,
"grad_norm": 0.050344448536634445,
"learning_rate": 0.00019972823484213669,
"loss": 0.7515,
"step": 78
},
{
"epoch": 0.08,
"grad_norm": 0.04634309560060501,
"learning_rate": 0.00019972018673362077,
"loss": 0.7709,
"step": 79
},
{
"epoch": 0.08,
"grad_norm": 0.05174633115530014,
"learning_rate": 0.00019971202135540993,
"loss": 0.7293,
"step": 80
},
{
"epoch": 0.08,
"grad_norm": 0.05256710946559906,
"learning_rate": 0.00019970373871710647,
"loss": 0.7085,
"step": 81
},
{
"epoch": 0.08,
"grad_norm": 0.07102134823799133,
"learning_rate": 0.00019969533882845076,
"loss": 0.668,
"step": 82
},
{
"epoch": 0.09,
"grad_norm": 0.05783531814813614,
"learning_rate": 0.00019968682169932083,
"loss": 0.7066,
"step": 83
},
{
"epoch": 0.09,
"grad_norm": 0.04908255860209465,
"learning_rate": 0.00019967818733973287,
"loss": 0.7456,
"step": 84
},
{
"epoch": 0.09,
"grad_norm": 0.04938043653964996,
"learning_rate": 0.00019966943575984068,
"loss": 0.7156,
"step": 85
},
{
"epoch": 0.09,
"grad_norm": 0.04811069369316101,
"learning_rate": 0.000199660566969936,
"loss": 0.7725,
"step": 86
},
{
"epoch": 0.09,
"grad_norm": 0.05829324200749397,
"learning_rate": 0.0001996515809804485,
"loss": 0.7384,
"step": 87
},
{
"epoch": 0.09,
"grad_norm": 0.05200457572937012,
"learning_rate": 0.00019964247780194548,
"loss": 0.7739,
"step": 88
},
{
"epoch": 0.09,
"grad_norm": 0.05518084019422531,
"learning_rate": 0.00019963325744513227,
"loss": 0.6794,
"step": 89
},
{
"epoch": 0.09,
"grad_norm": 0.07461239397525787,
"learning_rate": 0.00019962391992085185,
"loss": 0.7231,
"step": 90
},
{
"epoch": 0.09,
"grad_norm": 0.04882181063294411,
"learning_rate": 0.00019961446524008504,
"loss": 0.7539,
"step": 91
},
{
"epoch": 0.09,
"grad_norm": 0.05256763473153114,
"learning_rate": 0.0001996048934139504,
"loss": 0.7796,
"step": 92
},
{
"epoch": 0.1,
"grad_norm": 0.04832598939538002,
"learning_rate": 0.0001995952044537043,
"loss": 0.8047,
"step": 93
},
{
"epoch": 0.1,
"grad_norm": 0.05217672884464264,
"learning_rate": 0.00019958539837074084,
"loss": 0.7766,
"step": 94
},
{
"epoch": 0.1,
"grad_norm": 0.049672115594148636,
"learning_rate": 0.00019957547517659183,
"loss": 0.7328,
"step": 95
},
{
"epoch": 0.1,
"grad_norm": 0.0518164187669754,
"learning_rate": 0.00019956543488292686,
"loss": 0.6766,
"step": 96
},
{
"epoch": 0.1,
"grad_norm": 0.04681951552629471,
"learning_rate": 0.00019955527750155315,
"loss": 0.7647,
"step": 97
},
{
"epoch": 0.1,
"grad_norm": 0.048327744007110596,
"learning_rate": 0.0001995450030444157,
"loss": 0.7428,
"step": 98
},
{
"epoch": 0.1,
"grad_norm": 0.05234162509441376,
"learning_rate": 0.00019953461152359708,
"loss": 0.7551,
"step": 99
},
{
"epoch": 0.1,
"grad_norm": 0.04491112753748894,
"learning_rate": 0.0001995241029513176,
"loss": 0.6876,
"step": 100
},
{
"epoch": 0.1,
"grad_norm": 0.050490718334913254,
"learning_rate": 0.00019951347733993528,
"loss": 0.832,
"step": 101
},
{
"epoch": 0.11,
"grad_norm": 0.05716025456786156,
"learning_rate": 0.00019950273470194566,
"loss": 0.7393,
"step": 102
},
{
"epoch": 0.11,
"grad_norm": 0.048837609589099884,
"learning_rate": 0.00019949187504998193,
"loss": 0.7305,
"step": 103
},
{
"epoch": 0.11,
"grad_norm": 0.04992825165390968,
"learning_rate": 0.00019948089839681493,
"loss": 0.7371,
"step": 104
},
{
"epoch": 0.11,
"grad_norm": 0.05300760269165039,
"learning_rate": 0.00019946980475535307,
"loss": 0.7529,
"step": 105
},
{
"epoch": 0.11,
"grad_norm": 0.049852970987558365,
"learning_rate": 0.0001994585941386423,
"loss": 0.7111,
"step": 106
},
{
"epoch": 0.11,
"grad_norm": 0.05310487747192383,
"learning_rate": 0.0001994472665598662,
"loss": 0.6912,
"step": 107
},
{
"epoch": 0.11,
"grad_norm": 0.05248330160975456,
"learning_rate": 0.00019943582203234584,
"loss": 0.7445,
"step": 108
},
{
"epoch": 0.11,
"grad_norm": 0.05639020353555679,
"learning_rate": 0.00019942426056953987,
"loss": 0.7556,
"step": 109
},
{
"epoch": 0.11,
"grad_norm": 0.050534311681985855,
"learning_rate": 0.00019941258218504436,
"loss": 0.7028,
"step": 110
},
{
"epoch": 0.11,
"grad_norm": 0.05137235298752785,
"learning_rate": 0.00019940078689259305,
"loss": 0.7306,
"step": 111
},
{
"epoch": 0.12,
"grad_norm": 0.05214603245258331,
"learning_rate": 0.00019938887470605698,
"loss": 0.7153,
"step": 112
},
{
"epoch": 0.12,
"grad_norm": 0.05015432462096214,
"learning_rate": 0.00019937684563944475,
"loss": 0.7393,
"step": 113
},
{
"epoch": 0.12,
"grad_norm": 0.05170932784676552,
"learning_rate": 0.0001993646997069024,
"loss": 0.7947,
"step": 114
},
{
"epoch": 0.12,
"grad_norm": 0.050847794860601425,
"learning_rate": 0.00019935243692271337,
"loss": 0.7787,
"step": 115
},
{
"epoch": 0.12,
"grad_norm": 0.04788447543978691,
"learning_rate": 0.00019934005730129858,
"loss": 0.714,
"step": 116
},
{
"epoch": 0.12,
"grad_norm": 0.05097344517707825,
"learning_rate": 0.00019932756085721629,
"loss": 0.8083,
"step": 117
},
{
"epoch": 0.12,
"grad_norm": 0.04976595938205719,
"learning_rate": 0.00019931494760516217,
"loss": 0.8157,
"step": 118
},
{
"epoch": 0.12,
"grad_norm": 0.04821658134460449,
"learning_rate": 0.0001993022175599692,
"loss": 0.6772,
"step": 119
},
{
"epoch": 0.12,
"grad_norm": 0.054969336837530136,
"learning_rate": 0.00019928937073660782,
"loss": 0.7019,
"step": 120
},
{
"epoch": 0.12,
"grad_norm": 0.05310753360390663,
"learning_rate": 0.0001992764071501857,
"loss": 0.7266,
"step": 121
},
{
"epoch": 0.13,
"grad_norm": 0.04759340360760689,
"learning_rate": 0.00019926332681594787,
"loss": 0.7549,
"step": 122
},
{
"epoch": 0.13,
"grad_norm": 0.05405697599053383,
"learning_rate": 0.00019925012974927664,
"loss": 0.7416,
"step": 123
},
{
"epoch": 0.13,
"grad_norm": 0.07087159901857376,
"learning_rate": 0.0001992368159656916,
"loss": 0.7354,
"step": 124
},
{
"epoch": 0.13,
"grad_norm": 0.05469256639480591,
"learning_rate": 0.00019922338548084955,
"loss": 0.7082,
"step": 125
},
{
"epoch": 0.13,
"grad_norm": 0.05295870825648308,
"learning_rate": 0.00019920983831054462,
"loss": 0.7852,
"step": 126
},
{
"epoch": 0.13,
"grad_norm": 0.05017589032649994,
"learning_rate": 0.0001991961744707081,
"loss": 0.7362,
"step": 127
},
{
"epoch": 0.13,
"grad_norm": 0.052979759871959686,
"learning_rate": 0.0001991823939774085,
"loss": 0.7946,
"step": 128
},
{
"epoch": 0.13,
"grad_norm": 0.053128018975257874,
"learning_rate": 0.00019916849684685148,
"loss": 0.7697,
"step": 129
},
{
"epoch": 0.13,
"grad_norm": 0.05207785964012146,
"learning_rate": 0.00019915448309537994,
"loss": 0.7327,
"step": 130
},
{
"epoch": 0.14,
"grad_norm": 0.05230351909995079,
"learning_rate": 0.00019914035273947383,
"loss": 0.73,
"step": 131
},
{
"epoch": 0.14,
"grad_norm": 0.049084827303886414,
"learning_rate": 0.0001991261057957503,
"loss": 0.7928,
"step": 132
},
{
"epoch": 0.14,
"grad_norm": 0.05596386641263962,
"learning_rate": 0.00019911174228096355,
"loss": 0.7639,
"step": 133
},
{
"epoch": 0.14,
"grad_norm": 0.052596963942050934,
"learning_rate": 0.00019909726221200495,
"loss": 0.6955,
"step": 134
},
{
"epoch": 0.14,
"grad_norm": 0.05625372752547264,
"learning_rate": 0.00019908266560590283,
"loss": 0.7469,
"step": 135
},
{
"epoch": 0.14,
"grad_norm": 0.05166260153055191,
"learning_rate": 0.00019906795247982264,
"loss": 0.7589,
"step": 136
},
{
"epoch": 0.14,
"grad_norm": 0.052134498953819275,
"learning_rate": 0.0001990531228510668,
"loss": 0.7128,
"step": 137
},
{
"epoch": 0.14,
"grad_norm": 0.051448870450258255,
"learning_rate": 0.0001990381767370748,
"loss": 0.7148,
"step": 138
},
{
"epoch": 0.14,
"grad_norm": 0.053590189665555954,
"learning_rate": 0.00019902311415542307,
"loss": 0.7676,
"step": 139
},
{
"epoch": 0.14,
"grad_norm": 0.05264313891530037,
"learning_rate": 0.00019900793512382504,
"loss": 0.7393,
"step": 140
},
{
"epoch": 0.15,
"grad_norm": 0.05209501087665558,
"learning_rate": 0.00019899263966013106,
"loss": 0.7095,
"step": 141
},
{
"epoch": 0.15,
"grad_norm": 0.050406355410814285,
"learning_rate": 0.00019897722778232838,
"loss": 0.7533,
"step": 142
},
{
"epoch": 0.15,
"grad_norm": 0.053870897740125656,
"learning_rate": 0.00019896169950854117,
"loss": 0.7563,
"step": 143
},
{
"epoch": 0.15,
"grad_norm": 0.06269948929548264,
"learning_rate": 0.00019894605485703051,
"loss": 0.795,
"step": 144
},
{
"epoch": 0.15,
"grad_norm": 0.05105578899383545,
"learning_rate": 0.00019893029384619428,
"loss": 0.7268,
"step": 145
},
{
"epoch": 0.15,
"grad_norm": 0.049749474972486496,
"learning_rate": 0.0001989144164945673,
"loss": 0.6783,
"step": 146
},
{
"epoch": 0.15,
"grad_norm": 0.051559630781412125,
"learning_rate": 0.00019889842282082109,
"loss": 0.7204,
"step": 147
},
{
"epoch": 0.15,
"grad_norm": 0.051908280700445175,
"learning_rate": 0.00019888231284376402,
"loss": 0.7048,
"step": 148
},
{
"epoch": 0.15,
"grad_norm": 0.05648699402809143,
"learning_rate": 0.0001988660865823412,
"loss": 0.7145,
"step": 149
},
{
"epoch": 0.15,
"grad_norm": 0.05817189812660217,
"learning_rate": 0.00019884974405563454,
"loss": 0.6883,
"step": 150
},
{
"epoch": 0.16,
"grad_norm": 0.05215545743703842,
"learning_rate": 0.0001988332852828626,
"loss": 0.7226,
"step": 151
},
{
"epoch": 0.16,
"grad_norm": 0.05281899496912956,
"learning_rate": 0.0001988167102833808,
"loss": 0.7174,
"step": 152
},
{
"epoch": 0.16,
"grad_norm": 0.05761328712105751,
"learning_rate": 0.00019880001907668102,
"loss": 0.7779,
"step": 153
},
{
"epoch": 0.16,
"grad_norm": 0.05430774390697479,
"learning_rate": 0.00019878321168239195,
"loss": 0.7061,
"step": 154
},
{
"epoch": 0.16,
"grad_norm": 0.05135492607951164,
"learning_rate": 0.0001987662881202789,
"loss": 0.7317,
"step": 155
},
{
"epoch": 0.16,
"grad_norm": 0.05098530277609825,
"learning_rate": 0.00019874924841024373,
"loss": 0.7592,
"step": 156
},
{
"epoch": 0.16,
"grad_norm": 0.051871709525585175,
"learning_rate": 0.00019873209257232494,
"loss": 0.8104,
"step": 157
},
{
"epoch": 0.16,
"grad_norm": 0.0578281544148922,
"learning_rate": 0.0001987148206266976,
"loss": 0.7072,
"step": 158
},
{
"epoch": 0.16,
"grad_norm": 0.05245187506079674,
"learning_rate": 0.00019869743259367327,
"loss": 0.7621,
"step": 159
},
{
"epoch": 0.16,
"grad_norm": 0.05832192674279213,
"learning_rate": 0.00019867992849370008,
"loss": 0.7617,
"step": 160
},
{
"epoch": 0.17,
"grad_norm": 0.0534343346953392,
"learning_rate": 0.00019866230834736262,
"loss": 0.7869,
"step": 161
},
{
"epoch": 0.17,
"grad_norm": 0.061366647481918335,
"learning_rate": 0.00019864457217538196,
"loss": 0.7412,
"step": 162
},
{
"epoch": 0.17,
"grad_norm": 0.05365344136953354,
"learning_rate": 0.00019862671999861567,
"loss": 0.6683,
"step": 163
},
{
"epoch": 0.17,
"grad_norm": 0.05422106385231018,
"learning_rate": 0.0001986087518380576,
"loss": 0.7618,
"step": 164
},
{
"epoch": 0.17,
"grad_norm": 0.05549204722046852,
"learning_rate": 0.00019859066771483815,
"loss": 0.7674,
"step": 165
},
{
"epoch": 0.17,
"grad_norm": 0.053906746208667755,
"learning_rate": 0.000198572467650224,
"loss": 0.8516,
"step": 166
},
{
"epoch": 0.17,
"grad_norm": 0.05335883051156998,
"learning_rate": 0.0001985541516656182,
"loss": 0.7301,
"step": 167
},
{
"epoch": 0.17,
"grad_norm": 0.056282807141542435,
"learning_rate": 0.00019853571978256014,
"loss": 0.723,
"step": 168
},
{
"epoch": 0.17,
"grad_norm": 0.0541800893843174,
"learning_rate": 0.0001985171720227254,
"loss": 0.7342,
"step": 169
},
{
"epoch": 0.18,
"grad_norm": 0.053833167999982834,
"learning_rate": 0.00019849850840792605,
"loss": 0.742,
"step": 170
},
{
"epoch": 0.18,
"grad_norm": 0.0539311021566391,
"learning_rate": 0.00019847972896011012,
"loss": 0.733,
"step": 171
},
{
"epoch": 0.18,
"grad_norm": 0.055201999843120575,
"learning_rate": 0.00019846083370136217,
"loss": 0.7608,
"step": 172
},
{
"epoch": 0.18,
"grad_norm": 0.05684024840593338,
"learning_rate": 0.00019844182265390265,
"loss": 0.7475,
"step": 173
},
{
"epoch": 0.18,
"grad_norm": 0.05470721423625946,
"learning_rate": 0.00019842269584008837,
"loss": 0.7758,
"step": 174
},
{
"epoch": 0.18,
"grad_norm": 0.05426143482327461,
"learning_rate": 0.00019840345328241222,
"loss": 0.8154,
"step": 175
},
{
"epoch": 0.18,
"grad_norm": 0.05676954984664917,
"learning_rate": 0.00019838409500350318,
"loss": 0.6831,
"step": 176
},
{
"epoch": 0.18,
"grad_norm": 0.06125069782137871,
"learning_rate": 0.0001983646210261264,
"loss": 0.8139,
"step": 177
},
{
"epoch": 0.18,
"grad_norm": 0.055671993643045425,
"learning_rate": 0.000198345031373183,
"loss": 0.7354,
"step": 178
},
{
"epoch": 0.18,
"grad_norm": 0.05255590006709099,
"learning_rate": 0.00019832532606771017,
"loss": 0.772,
"step": 179
},
{
"epoch": 0.19,
"grad_norm": 0.05209804326295853,
"learning_rate": 0.0001983055051328811,
"loss": 0.702,
"step": 180
},
{
"epoch": 0.19,
"grad_norm": 0.051347844302654266,
"learning_rate": 0.000198285568592005,
"loss": 0.7941,
"step": 181
},
{
"epoch": 0.19,
"grad_norm": 0.049175627529621124,
"learning_rate": 0.00019826551646852692,
"loss": 0.7156,
"step": 182
},
{
"epoch": 0.19,
"grad_norm": 0.06953848898410797,
"learning_rate": 0.000198245348786028,
"loss": 0.6817,
"step": 183
},
{
"epoch": 0.19,
"grad_norm": 0.05407484620809555,
"learning_rate": 0.00019822506556822506,
"loss": 0.7818,
"step": 184
},
{
"epoch": 0.19,
"grad_norm": 0.053331997245550156,
"learning_rate": 0.00019820466683897103,
"loss": 0.721,
"step": 185
},
{
"epoch": 0.19,
"grad_norm": 0.05798467993736267,
"learning_rate": 0.0001981841526222545,
"loss": 0.7407,
"step": 186
},
{
"epoch": 0.19,
"grad_norm": 0.05653946474194527,
"learning_rate": 0.00019816352294219994,
"loss": 0.7092,
"step": 187
},
{
"epoch": 0.19,
"grad_norm": 0.05470096692442894,
"learning_rate": 0.0001981427778230676,
"loss": 0.7337,
"step": 188
},
{
"epoch": 0.19,
"grad_norm": 0.06434475630521774,
"learning_rate": 0.0001981219172892535,
"loss": 0.6977,
"step": 189
},
{
"epoch": 0.2,
"grad_norm": 0.052794575691223145,
"learning_rate": 0.00019810094136528934,
"loss": 0.7601,
"step": 190
},
{
"epoch": 0.2,
"grad_norm": 0.05482126399874687,
"learning_rate": 0.00019807985007584255,
"loss": 0.7619,
"step": 191
},
{
"epoch": 0.2,
"grad_norm": 0.06532236188650131,
"learning_rate": 0.00019805864344571626,
"loss": 0.7934,
"step": 192
},
{
"epoch": 0.2,
"grad_norm": 0.0744403600692749,
"learning_rate": 0.00019803732149984912,
"loss": 0.6666,
"step": 193
},
{
"epoch": 0.2,
"grad_norm": 0.05643633008003235,
"learning_rate": 0.00019801588426331557,
"loss": 0.7519,
"step": 194
},
{
"epoch": 0.2,
"grad_norm": 0.05338543280959129,
"learning_rate": 0.00019799433176132544,
"loss": 0.7072,
"step": 195
},
{
"epoch": 0.2,
"grad_norm": 0.05368245020508766,
"learning_rate": 0.00019797266401922427,
"loss": 0.7325,
"step": 196
},
{
"epoch": 0.2,
"grad_norm": 0.05443282797932625,
"learning_rate": 0.00019795088106249305,
"loss": 0.8166,
"step": 197
},
{
"epoch": 0.2,
"grad_norm": 0.052494440227746964,
"learning_rate": 0.00019792898291674823,
"loss": 0.7339,
"step": 198
},
{
"epoch": 0.21,
"grad_norm": 0.06454979628324509,
"learning_rate": 0.0001979069696077418,
"loss": 0.803,
"step": 199
},
{
"epoch": 0.21,
"grad_norm": 0.054398063570261,
"learning_rate": 0.00019788484116136108,
"loss": 0.7857,
"step": 200
},
{
"epoch": 0.21,
"grad_norm": 0.05177140235900879,
"learning_rate": 0.0001978625976036289,
"loss": 0.7605,
"step": 201
},
{
"epoch": 0.21,
"grad_norm": 0.05473088100552559,
"learning_rate": 0.00019784023896070337,
"loss": 0.7145,
"step": 202
},
{
"epoch": 0.21,
"grad_norm": 0.06038124859333038,
"learning_rate": 0.00019781776525887797,
"loss": 0.7154,
"step": 203
},
{
"epoch": 0.21,
"grad_norm": 0.054728008806705475,
"learning_rate": 0.00019779517652458156,
"loss": 0.7575,
"step": 204
},
{
"epoch": 0.21,
"grad_norm": 0.05367952585220337,
"learning_rate": 0.0001977724727843781,
"loss": 0.7259,
"step": 205
},
{
"epoch": 0.21,
"grad_norm": 0.056090373545885086,
"learning_rate": 0.00019774965406496703,
"loss": 0.796,
"step": 206
},
{
"epoch": 0.21,
"grad_norm": 0.056659117341041565,
"learning_rate": 0.00019772672039318278,
"loss": 0.7572,
"step": 207
},
{
"epoch": 0.21,
"grad_norm": 0.061686623841524124,
"learning_rate": 0.0001977036717959951,
"loss": 0.7514,
"step": 208
},
{
"epoch": 0.22,
"grad_norm": 0.05229324847459793,
"learning_rate": 0.00019768050830050888,
"loss": 0.7923,
"step": 209
},
{
"epoch": 0.22,
"grad_norm": 0.05652037635445595,
"learning_rate": 0.00019765722993396402,
"loss": 0.7452,
"step": 210
},
{
"epoch": 0.22,
"grad_norm": 0.0566401481628418,
"learning_rate": 0.00019763383672373566,
"loss": 0.6641,
"step": 211
},
{
"epoch": 0.22,
"grad_norm": 0.061378199607133865,
"learning_rate": 0.00019761032869733395,
"loss": 0.7042,
"step": 212
},
{
"epoch": 0.22,
"grad_norm": 0.05892565846443176,
"learning_rate": 0.00019758670588240396,
"loss": 0.7209,
"step": 213
},
{
"epoch": 0.22,
"grad_norm": 0.054732099175453186,
"learning_rate": 0.00019756296830672585,
"loss": 0.7396,
"step": 214
},
{
"epoch": 0.22,
"grad_norm": 0.053043294697999954,
"learning_rate": 0.0001975391159982147,
"loss": 0.7441,
"step": 215
},
{
"epoch": 0.22,
"grad_norm": 0.058401089161634445,
"learning_rate": 0.00019751514898492056,
"loss": 0.7553,
"step": 216
},
{
"epoch": 0.22,
"grad_norm": 0.05779936537146568,
"learning_rate": 0.0001974910672950283,
"loss": 0.6659,
"step": 217
},
{
"epoch": 0.22,
"grad_norm": 0.0589526928961277,
"learning_rate": 0.00019746687095685775,
"loss": 0.7177,
"step": 218
},
{
"epoch": 0.23,
"grad_norm": 0.05505611002445221,
"learning_rate": 0.00019744255999886345,
"loss": 0.702,
"step": 219
},
{
"epoch": 0.23,
"grad_norm": 0.053444795310497284,
"learning_rate": 0.00019741813444963476,
"loss": 0.7458,
"step": 220
},
{
"epoch": 0.23,
"grad_norm": 0.05537053942680359,
"learning_rate": 0.0001973935943378959,
"loss": 0.7178,
"step": 221
},
{
"epoch": 0.23,
"grad_norm": 0.05319499969482422,
"learning_rate": 0.00019736893969250568,
"loss": 0.6376,
"step": 222
},
{
"epoch": 0.23,
"grad_norm": 0.05316654592752457,
"learning_rate": 0.0001973441705424577,
"loss": 0.7649,
"step": 223
},
{
"epoch": 0.23,
"grad_norm": 0.06086993217468262,
"learning_rate": 0.00019731928691688006,
"loss": 0.7171,
"step": 224
},
{
"epoch": 0.23,
"grad_norm": 0.058167632669210434,
"learning_rate": 0.0001972942888450357,
"loss": 0.7636,
"step": 225
},
{
"epoch": 0.23,
"grad_norm": 0.057685092091560364,
"learning_rate": 0.00019726917635632204,
"loss": 0.7487,
"step": 226
},
{
"epoch": 0.23,
"grad_norm": 0.056136585772037506,
"learning_rate": 0.000197243949480271,
"loss": 0.7456,
"step": 227
},
{
"epoch": 0.24,
"grad_norm": 0.052869562059640884,
"learning_rate": 0.0001972186082465491,
"loss": 0.693,
"step": 228
},
{
"epoch": 0.24,
"grad_norm": 0.05286895111203194,
"learning_rate": 0.0001971931526849573,
"loss": 0.713,
"step": 229
},
{
"epoch": 0.24,
"grad_norm": 0.05643904581665993,
"learning_rate": 0.00019716758282543106,
"loss": 0.7467,
"step": 230
},
{
"epoch": 0.24,
"grad_norm": 0.05756821483373642,
"learning_rate": 0.00019714189869804018,
"loss": 0.675,
"step": 231
},
{
"epoch": 0.24,
"grad_norm": 0.05546725168824196,
"learning_rate": 0.00019711610033298888,
"loss": 0.7998,
"step": 232
},
{
"epoch": 0.24,
"grad_norm": 0.06143364682793617,
"learning_rate": 0.0001970901877606157,
"loss": 0.728,
"step": 233
},
{
"epoch": 0.24,
"grad_norm": 0.06431245058774948,
"learning_rate": 0.00019706416101139353,
"loss": 0.7068,
"step": 234
},
{
"epoch": 0.24,
"grad_norm": 0.054505374282598495,
"learning_rate": 0.0001970380201159295,
"loss": 0.6696,
"step": 235
},
{
"epoch": 0.24,
"grad_norm": 0.054913364350795746,
"learning_rate": 0.00019701176510496495,
"loss": 0.7246,
"step": 236
},
{
"epoch": 0.24,
"grad_norm": 0.06259609013795853,
"learning_rate": 0.0001969853960093755,
"loss": 0.7613,
"step": 237
},
{
"epoch": 0.25,
"grad_norm": 0.0802953839302063,
"learning_rate": 0.00019695891286017082,
"loss": 0.7843,
"step": 238
},
{
"epoch": 0.25,
"grad_norm": 0.054023925215005875,
"learning_rate": 0.00019693231568849475,
"loss": 0.7848,
"step": 239
},
{
"epoch": 0.25,
"grad_norm": 0.05291634425520897,
"learning_rate": 0.0001969056045256253,
"loss": 0.5979,
"step": 240
},
{
"epoch": 0.25,
"grad_norm": 0.052885495126247406,
"learning_rate": 0.00019687877940297442,
"loss": 0.715,
"step": 241
},
{
"epoch": 0.25,
"grad_norm": 0.054459016770124435,
"learning_rate": 0.00019685184035208813,
"loss": 0.7413,
"step": 242
},
{
"epoch": 0.25,
"grad_norm": 0.058789074420928955,
"learning_rate": 0.0001968247874046464,
"loss": 0.7637,
"step": 243
},
{
"epoch": 0.25,
"eval_loss": 0.8141337633132935,
"eval_runtime": 111.9734,
"eval_samples_per_second": 88.753,
"eval_steps_per_second": 22.193,
"step": 243
},
{
"epoch": 0.25,
"grad_norm": 0.054029788821935654,
"learning_rate": 0.0001967976205924632,
"loss": 0.7184,
"step": 244
},
{
"epoch": 0.25,
"grad_norm": 0.055858369916677475,
"learning_rate": 0.00019677033994748631,
"loss": 0.7238,
"step": 245
},
{
"epoch": 0.25,
"grad_norm": 0.05345359072089195,
"learning_rate": 0.0001967429455017975,
"loss": 0.6889,
"step": 246
},
{
"epoch": 0.25,
"grad_norm": 0.06066269427537918,
"learning_rate": 0.00019671543728761224,
"loss": 0.8698,
"step": 247
},
{
"epoch": 0.26,
"grad_norm": 0.058042511343955994,
"learning_rate": 0.00019668781533727984,
"loss": 0.6563,
"step": 248
},
{
"epoch": 0.26,
"grad_norm": 0.056662410497665405,
"learning_rate": 0.00019666007968328336,
"loss": 0.6852,
"step": 249
},
{
"epoch": 0.26,
"grad_norm": 0.05772121623158455,
"learning_rate": 0.00019663223035823964,
"loss": 0.7117,
"step": 250
},
{
"epoch": 0.26,
"grad_norm": 0.0612647645175457,
"learning_rate": 0.00019660426739489913,
"loss": 0.7662,
"step": 251
},
{
"epoch": 0.26,
"grad_norm": 0.06314199417829514,
"learning_rate": 0.00019657619082614586,
"loss": 0.7101,
"step": 252
},
{
"epoch": 0.26,
"grad_norm": 0.05658784881234169,
"learning_rate": 0.0001965480006849976,
"loss": 0.7685,
"step": 253
},
{
"epoch": 0.26,
"grad_norm": 0.05959884822368622,
"learning_rate": 0.00019651969700460557,
"loss": 0.7267,
"step": 254
},
{
"epoch": 0.26,
"grad_norm": 0.058939650654792786,
"learning_rate": 0.00019649127981825456,
"loss": 0.6825,
"step": 255
},
{
"epoch": 0.26,
"grad_norm": 0.060037851333618164,
"learning_rate": 0.00019646274915936278,
"loss": 0.7476,
"step": 256
},
{
"epoch": 0.27,
"grad_norm": 0.06029137596487999,
"learning_rate": 0.00019643410506148197,
"loss": 0.7497,
"step": 257
},
{
"epoch": 0.27,
"grad_norm": 0.05577164515852928,
"learning_rate": 0.00019640534755829725,
"loss": 0.7159,
"step": 258
},
{
"epoch": 0.27,
"grad_norm": 0.05618567392230034,
"learning_rate": 0.00019637647668362704,
"loss": 0.7257,
"step": 259
},
{
"epoch": 0.27,
"grad_norm": 0.06020184978842735,
"learning_rate": 0.00019634749247142313,
"loss": 0.6776,
"step": 260
},
{
"epoch": 0.27,
"grad_norm": 0.06419654935598373,
"learning_rate": 0.00019631839495577066,
"loss": 0.6984,
"step": 261
},
{
"epoch": 0.27,
"grad_norm": 0.057507649064064026,
"learning_rate": 0.00019628918417088784,
"loss": 0.6914,
"step": 262
},
{
"epoch": 0.27,
"grad_norm": 0.05764051154255867,
"learning_rate": 0.00019625986015112628,
"loss": 0.7786,
"step": 263
},
{
"epoch": 0.27,
"grad_norm": 0.056871023029088974,
"learning_rate": 0.0001962304229309706,
"loss": 0.7259,
"step": 264
},
{
"epoch": 0.27,
"grad_norm": 0.05893797427415848,
"learning_rate": 0.00019620087254503866,
"loss": 0.7435,
"step": 265
},
{
"epoch": 0.27,
"grad_norm": 0.0583423487842083,
"learning_rate": 0.00019617120902808125,
"loss": 0.7597,
"step": 266
},
{
"epoch": 0.28,
"grad_norm": 0.058257438242435455,
"learning_rate": 0.00019614143241498242,
"loss": 0.6764,
"step": 267
},
{
"epoch": 0.28,
"grad_norm": 0.06315512955188751,
"learning_rate": 0.00019611154274075903,
"loss": 0.6298,
"step": 268
},
{
"epoch": 0.28,
"grad_norm": 0.06553982943296432,
"learning_rate": 0.00019608154004056093,
"loss": 0.6789,
"step": 269
},
{
"epoch": 0.28,
"grad_norm": 0.05806327983736992,
"learning_rate": 0.000196051424349671,
"loss": 0.7067,
"step": 270
},
{
"epoch": 0.28,
"grad_norm": 0.057144396007061005,
"learning_rate": 0.00019602119570350483,
"loss": 0.6802,
"step": 271
},
{
"epoch": 0.28,
"grad_norm": 0.05949268490076065,
"learning_rate": 0.000195990854137611,
"loss": 0.7046,
"step": 272
},
{
"epoch": 0.28,
"grad_norm": 0.05610249564051628,
"learning_rate": 0.0001959603996876708,
"loss": 0.7682,
"step": 273
},
{
"epoch": 0.28,
"grad_norm": 0.056379836052656174,
"learning_rate": 0.00019592983238949824,
"loss": 0.714,
"step": 274
},
{
"epoch": 0.28,
"grad_norm": 0.05497708171606064,
"learning_rate": 0.00019589915227904016,
"loss": 0.6559,
"step": 275
},
{
"epoch": 0.28,
"grad_norm": 0.06712428480386734,
"learning_rate": 0.0001958683593923759,
"loss": 0.772,
"step": 276
},
{
"epoch": 0.29,
"grad_norm": 0.054381344467401505,
"learning_rate": 0.00019583745376571755,
"loss": 0.7686,
"step": 277
},
{
"epoch": 0.29,
"grad_norm": 0.059507984668016434,
"learning_rate": 0.00019580643543540976,
"loss": 0.7504,
"step": 278
},
{
"epoch": 0.29,
"grad_norm": 0.06055927649140358,
"learning_rate": 0.00019577530443792967,
"loss": 0.7746,
"step": 279
},
{
"epoch": 0.29,
"grad_norm": 0.05875448137521744,
"learning_rate": 0.00019574406080988697,
"loss": 0.7587,
"step": 280
},
{
"epoch": 0.29,
"grad_norm": 0.05514727905392647,
"learning_rate": 0.00019571270458802373,
"loss": 0.7401,
"step": 281
},
{
"epoch": 0.29,
"grad_norm": 0.05958930030465126,
"learning_rate": 0.00019568123580921453,
"loss": 0.7265,
"step": 282
},
{
"epoch": 0.29,
"grad_norm": 0.05936933308839798,
"learning_rate": 0.00019564965451046625,
"loss": 0.7312,
"step": 283
},
{
"epoch": 0.29,
"grad_norm": 0.06500967592000961,
"learning_rate": 0.00019561796072891806,
"loss": 0.744,
"step": 284
},
{
"epoch": 0.29,
"grad_norm": 0.05779334157705307,
"learning_rate": 0.0001955861545018415,
"loss": 0.7134,
"step": 285
},
{
"epoch": 0.29,
"grad_norm": 0.05654512718319893,
"learning_rate": 0.00019555423586664028,
"loss": 0.7576,
"step": 286
},
{
"epoch": 0.3,
"grad_norm": 0.08160107582807541,
"learning_rate": 0.00019552220486085032,
"loss": 0.7471,
"step": 287
},
{
"epoch": 0.3,
"grad_norm": 0.05500080808997154,
"learning_rate": 0.0001954900615221397,
"loss": 0.7135,
"step": 288
},
{
"epoch": 0.3,
"grad_norm": 0.05892986059188843,
"learning_rate": 0.00019545780588830854,
"loss": 0.7841,
"step": 289
},
{
"epoch": 0.3,
"grad_norm": 0.05923021584749222,
"learning_rate": 0.00019542543799728907,
"loss": 0.7111,
"step": 290
},
{
"epoch": 0.3,
"grad_norm": 0.059189844876527786,
"learning_rate": 0.00019539295788714554,
"loss": 0.7892,
"step": 291
},
{
"epoch": 0.3,
"grad_norm": 0.0577772781252861,
"learning_rate": 0.00019536036559607417,
"loss": 0.773,
"step": 292
},
{
"epoch": 0.3,
"grad_norm": 0.06281998008489609,
"learning_rate": 0.0001953276611624031,
"loss": 0.791,
"step": 293
},
{
"epoch": 0.3,
"grad_norm": 0.061750974506139755,
"learning_rate": 0.00019529484462459226,
"loss": 0.7482,
"step": 294
},
{
"epoch": 0.3,
"grad_norm": 0.06235501915216446,
"learning_rate": 0.00019526191602123357,
"loss": 0.7399,
"step": 295
},
{
"epoch": 0.31,
"grad_norm": 0.057336192578077316,
"learning_rate": 0.00019522887539105064,
"loss": 0.7354,
"step": 296
},
{
"epoch": 0.31,
"grad_norm": 0.05565524473786354,
"learning_rate": 0.0001951957227728988,
"loss": 0.6847,
"step": 297
},
{
"epoch": 0.31,
"grad_norm": 0.06352953612804413,
"learning_rate": 0.00019516245820576513,
"loss": 0.7753,
"step": 298
},
{
"epoch": 0.31,
"grad_norm": 0.054871831089258194,
"learning_rate": 0.0001951290817287684,
"loss": 0.7619,
"step": 299
},
{
"epoch": 0.31,
"grad_norm": 0.05734177678823471,
"learning_rate": 0.00019509559338115883,
"loss": 0.7775,
"step": 300
},
{
"epoch": 0.31,
"grad_norm": 0.05894226208329201,
"learning_rate": 0.00019506199320231838,
"loss": 0.7182,
"step": 301
},
{
"epoch": 0.31,
"grad_norm": 0.05706853047013283,
"learning_rate": 0.00019502828123176043,
"loss": 0.795,
"step": 302
},
{
"epoch": 0.31,
"grad_norm": 0.06002698838710785,
"learning_rate": 0.00019499445750912982,
"loss": 0.8106,
"step": 303
},
{
"epoch": 0.31,
"grad_norm": 0.05632604658603668,
"learning_rate": 0.0001949605220742028,
"loss": 0.7415,
"step": 304
},
{
"epoch": 0.31,
"grad_norm": 0.05873245373368263,
"learning_rate": 0.00019492647496688705,
"loss": 0.7293,
"step": 305
},
{
"epoch": 0.32,
"grad_norm": 0.060128260403871536,
"learning_rate": 0.00019489231622722155,
"loss": 0.7364,
"step": 306
},
{
"epoch": 0.32,
"grad_norm": 0.05893580988049507,
"learning_rate": 0.00019485804589537654,
"loss": 0.7152,
"step": 307
},
{
"epoch": 0.32,
"grad_norm": 0.07257378846406937,
"learning_rate": 0.0001948236640116535,
"loss": 0.7262,
"step": 308
},
{
"epoch": 0.32,
"grad_norm": 0.057697609066963196,
"learning_rate": 0.0001947891706164851,
"loss": 0.6915,
"step": 309
},
{
"epoch": 0.32,
"grad_norm": 0.05808861181139946,
"learning_rate": 0.00019475456575043513,
"loss": 0.6977,
"step": 310
},
{
"epoch": 0.32,
"grad_norm": 0.060822539031505585,
"learning_rate": 0.0001947198494541985,
"loss": 0.7963,
"step": 311
},
{
"epoch": 0.32,
"grad_norm": 0.05885370075702667,
"learning_rate": 0.00019468502176860116,
"loss": 0.782,
"step": 312
},
{
"epoch": 0.32,
"grad_norm": 0.05855882540345192,
"learning_rate": 0.00019465008273459997,
"loss": 0.7248,
"step": 313
},
{
"epoch": 0.32,
"grad_norm": 0.06043313443660736,
"learning_rate": 0.00019461503239328283,
"loss": 0.7003,
"step": 314
},
{
"epoch": 0.32,
"grad_norm": 0.0554848350584507,
"learning_rate": 0.00019457987078586852,
"loss": 0.6923,
"step": 315
},
{
"epoch": 0.33,
"grad_norm": 0.05844718590378761,
"learning_rate": 0.00019454459795370665,
"loss": 0.7486,
"step": 316
},
{
"epoch": 0.33,
"grad_norm": 0.059286076575517654,
"learning_rate": 0.0001945092139382776,
"loss": 0.7273,
"step": 317
},
{
"epoch": 0.33,
"grad_norm": 0.07302168011665344,
"learning_rate": 0.00019447371878119257,
"loss": 0.7323,
"step": 318
},
{
"epoch": 0.33,
"grad_norm": 0.0687604621052742,
"learning_rate": 0.00019443811252419337,
"loss": 0.7803,
"step": 319
},
{
"epoch": 0.33,
"grad_norm": 0.057997170835733414,
"learning_rate": 0.00019440239520915258,
"loss": 0.7097,
"step": 320
},
{
"epoch": 0.33,
"grad_norm": 0.05903468281030655,
"learning_rate": 0.00019436656687807328,
"loss": 0.7934,
"step": 321
},
{
"epoch": 0.33,
"grad_norm": 0.06016958877444267,
"learning_rate": 0.00019433062757308914,
"loss": 0.6503,
"step": 322
},
{
"epoch": 0.33,
"grad_norm": 0.06088041886687279,
"learning_rate": 0.00019429457733646433,
"loss": 0.7536,
"step": 323
},
{
"epoch": 0.33,
"grad_norm": 0.057344887405633926,
"learning_rate": 0.00019425841621059347,
"loss": 0.6832,
"step": 324
},
{
"epoch": 0.34,
"grad_norm": 0.061634745448827744,
"learning_rate": 0.0001942221442380016,
"loss": 0.6935,
"step": 325
},
{
"epoch": 0.34,
"grad_norm": 0.06309819221496582,
"learning_rate": 0.00019418576146134419,
"loss": 0.7277,
"step": 326
},
{
"epoch": 0.34,
"grad_norm": 0.05775715038180351,
"learning_rate": 0.0001941492679234068,
"loss": 0.6798,
"step": 327
},
{
"epoch": 0.34,
"grad_norm": 0.05710741505026817,
"learning_rate": 0.00019411266366710543,
"loss": 0.6822,
"step": 328
},
{
"epoch": 0.34,
"grad_norm": 0.05784869194030762,
"learning_rate": 0.0001940759487354862,
"loss": 0.6763,
"step": 329
},
{
"epoch": 0.34,
"grad_norm": 0.05700847879052162,
"learning_rate": 0.0001940391231717255,
"loss": 0.6696,
"step": 330
},
{
"epoch": 0.34,
"grad_norm": 0.06073322892189026,
"learning_rate": 0.00019400218701912965,
"loss": 0.7032,
"step": 331
},
{
"epoch": 0.34,
"grad_norm": 0.06225202605128288,
"learning_rate": 0.00019396514032113513,
"loss": 0.7258,
"step": 332
},
{
"epoch": 0.34,
"grad_norm": 0.06155748292803764,
"learning_rate": 0.00019392798312130836,
"loss": 0.7372,
"step": 333
},
{
"epoch": 0.34,
"grad_norm": 0.06383652240037918,
"learning_rate": 0.00019389071546334585,
"loss": 0.7572,
"step": 334
},
{
"epoch": 0.35,
"grad_norm": 0.057412583380937576,
"learning_rate": 0.00019385333739107375,
"loss": 0.6642,
"step": 335
},
{
"epoch": 0.35,
"grad_norm": 0.061736416071653366,
"learning_rate": 0.00019381584894844834,
"loss": 0.7309,
"step": 336
},
{
"epoch": 0.35,
"grad_norm": 0.060744259506464005,
"learning_rate": 0.00019377825017955549,
"loss": 0.7161,
"step": 337
},
{
"epoch": 0.35,
"grad_norm": 0.05964282155036926,
"learning_rate": 0.00019374054112861089,
"loss": 0.7405,
"step": 338
},
{
"epoch": 0.35,
"grad_norm": 0.06293050944805145,
"learning_rate": 0.00019370272183995993,
"loss": 0.645,
"step": 339
},
{
"epoch": 0.35,
"grad_norm": 0.05713023245334625,
"learning_rate": 0.0001936647923580776,
"loss": 0.7232,
"step": 340
},
{
"epoch": 0.35,
"grad_norm": 0.05848432704806328,
"learning_rate": 0.0001936267527275685,
"loss": 0.735,
"step": 341
},
{
"epoch": 0.35,
"grad_norm": 0.05630767345428467,
"learning_rate": 0.00019358860299316678,
"loss": 0.7797,
"step": 342
},
{
"epoch": 0.35,
"grad_norm": 0.06115993112325668,
"learning_rate": 0.00019355034319973603,
"loss": 0.6872,
"step": 343
},
{
"epoch": 0.35,
"grad_norm": 0.06437361240386963,
"learning_rate": 0.00019351197339226935,
"loss": 0.7728,
"step": 344
},
{
"epoch": 0.36,
"grad_norm": 0.06171271950006485,
"learning_rate": 0.00019347349361588907,
"loss": 0.7873,
"step": 345
},
{
"epoch": 0.36,
"grad_norm": 0.06096939742565155,
"learning_rate": 0.00019343490391584696,
"loss": 0.702,
"step": 346
},
{
"epoch": 0.36,
"grad_norm": 0.05415042117238045,
"learning_rate": 0.000193396204337524,
"loss": 0.6321,
"step": 347
},
{
"epoch": 0.36,
"grad_norm": 0.05933550372719765,
"learning_rate": 0.0001933573949264305,
"loss": 0.7682,
"step": 348
},
{
"epoch": 0.36,
"grad_norm": 0.05992317944765091,
"learning_rate": 0.00019331847572820574,
"loss": 0.7141,
"step": 349
},
{
"epoch": 0.36,
"grad_norm": 0.061700768768787384,
"learning_rate": 0.00019327944678861826,
"loss": 0.787,
"step": 350
},
{
"epoch": 0.36,
"grad_norm": 0.06429409980773926,
"learning_rate": 0.0001932403081535656,
"loss": 0.8566,
"step": 351
},
{
"epoch": 0.36,
"grad_norm": 0.060838643461465836,
"learning_rate": 0.00019320105986907432,
"loss": 0.7432,
"step": 352
},
{
"epoch": 0.36,
"grad_norm": 0.06311419606208801,
"learning_rate": 0.0001931617019812999,
"loss": 0.7504,
"step": 353
},
{
"epoch": 0.37,
"grad_norm": 0.05914825573563576,
"learning_rate": 0.00019312223453652673,
"loss": 0.7791,
"step": 354
},
{
"epoch": 0.37,
"grad_norm": 0.06227264553308487,
"learning_rate": 0.000193082657581168,
"loss": 0.7584,
"step": 355
},
{
"epoch": 0.37,
"grad_norm": 0.05977202579379082,
"learning_rate": 0.00019304297116176574,
"loss": 0.6649,
"step": 356
},
{
"epoch": 0.37,
"grad_norm": 0.059180788695812225,
"learning_rate": 0.00019300317532499069,
"loss": 0.6559,
"step": 357
},
{
"epoch": 0.37,
"grad_norm": 0.06028233468532562,
"learning_rate": 0.00019296327011764225,
"loss": 0.7214,
"step": 358
},
{
"epoch": 0.37,
"grad_norm": 0.06343667954206467,
"learning_rate": 0.00019292325558664843,
"loss": 0.6716,
"step": 359
},
{
"epoch": 0.37,
"grad_norm": 0.059275731444358826,
"learning_rate": 0.00019288313177906588,
"loss": 0.7321,
"step": 360
},
{
"epoch": 0.37,
"grad_norm": 0.06691042333841324,
"learning_rate": 0.00019284289874207963,
"loss": 0.7095,
"step": 361
},
{
"epoch": 0.37,
"grad_norm": 0.06491021066904068,
"learning_rate": 0.00019280255652300325,
"loss": 0.6874,
"step": 362
},
{
"epoch": 0.37,
"grad_norm": 0.06535612046718597,
"learning_rate": 0.00019276210516927872,
"loss": 0.7175,
"step": 363
},
{
"epoch": 0.38,
"grad_norm": 0.06091107428073883,
"learning_rate": 0.00019272154472847625,
"loss": 0.6964,
"step": 364
},
{
"epoch": 0.38,
"grad_norm": 0.06472720205783844,
"learning_rate": 0.00019268087524829447,
"loss": 0.7771,
"step": 365
},
{
"epoch": 0.38,
"grad_norm": 0.06106911227107048,
"learning_rate": 0.0001926400967765602,
"loss": 0.7023,
"step": 366
},
{
"epoch": 0.38,
"grad_norm": 0.06196704879403114,
"learning_rate": 0.00019259920936122836,
"loss": 0.7171,
"step": 367
},
{
"epoch": 0.38,
"grad_norm": 0.06340812891721725,
"learning_rate": 0.00019255821305038206,
"loss": 0.6776,
"step": 368
},
{
"epoch": 0.38,
"grad_norm": 0.05937547609210014,
"learning_rate": 0.0001925171078922325,
"loss": 0.6742,
"step": 369
},
{
"epoch": 0.38,
"grad_norm": 0.05785318836569786,
"learning_rate": 0.00019247589393511872,
"loss": 0.6348,
"step": 370
},
{
"epoch": 0.38,
"grad_norm": 0.060800664126873016,
"learning_rate": 0.00019243457122750793,
"loss": 0.7789,
"step": 371
},
{
"epoch": 0.38,
"grad_norm": 0.06566699594259262,
"learning_rate": 0.00019239313981799506,
"loss": 0.7235,
"step": 372
},
{
"epoch": 0.38,
"grad_norm": 0.06358596682548523,
"learning_rate": 0.00019235159975530293,
"loss": 0.6262,
"step": 373
},
{
"epoch": 0.39,
"grad_norm": 0.06445778906345367,
"learning_rate": 0.00019230995108828215,
"loss": 0.6495,
"step": 374
},
{
"epoch": 0.39,
"grad_norm": 0.059928618371486664,
"learning_rate": 0.00019226819386591106,
"loss": 0.6459,
"step": 375
},
{
"epoch": 0.39,
"grad_norm": 0.0632234737277031,
"learning_rate": 0.0001922263281372956,
"loss": 0.6777,
"step": 376
},
{
"epoch": 0.39,
"grad_norm": 0.06647662818431854,
"learning_rate": 0.00019218435395166933,
"loss": 0.6579,
"step": 377
},
{
"epoch": 0.39,
"grad_norm": 0.07134454697370529,
"learning_rate": 0.0001921422713583934,
"loss": 0.7326,
"step": 378
},
{
"epoch": 0.39,
"grad_norm": 0.056324154138565063,
"learning_rate": 0.00019210008040695638,
"loss": 0.6974,
"step": 379
},
{
"epoch": 0.39,
"grad_norm": 0.06590214371681213,
"learning_rate": 0.00019205778114697437,
"loss": 0.6706,
"step": 380
},
{
"epoch": 0.39,
"grad_norm": 0.05920105054974556,
"learning_rate": 0.0001920153736281907,
"loss": 0.8247,
"step": 381
},
{
"epoch": 0.39,
"grad_norm": 0.06643356382846832,
"learning_rate": 0.00019197285790047608,
"loss": 0.7184,
"step": 382
},
{
"epoch": 0.39,
"grad_norm": 0.07437850534915924,
"learning_rate": 0.0001919302340138285,
"loss": 0.8304,
"step": 383
},
{
"epoch": 0.4,
"grad_norm": 0.059930771589279175,
"learning_rate": 0.0001918875020183732,
"loss": 0.6705,
"step": 384
},
{
"epoch": 0.4,
"grad_norm": 0.05600350350141525,
"learning_rate": 0.00019184466196436236,
"loss": 0.6985,
"step": 385
},
{
"epoch": 0.4,
"grad_norm": 0.05550659820437431,
"learning_rate": 0.00019180171390217537,
"loss": 0.7603,
"step": 386
},
{
"epoch": 0.4,
"grad_norm": 0.0702609270811081,
"learning_rate": 0.00019175865788231863,
"loss": 0.6904,
"step": 387
},
{
"epoch": 0.4,
"grad_norm": 0.07218141108751297,
"learning_rate": 0.00019171549395542546,
"loss": 0.7817,
"step": 388
},
{
"epoch": 0.4,
"grad_norm": 0.06199540197849274,
"learning_rate": 0.00019167222217225617,
"loss": 0.7508,
"step": 389
},
{
"epoch": 0.4,
"grad_norm": 0.06544966995716095,
"learning_rate": 0.00019162884258369775,
"loss": 0.7264,
"step": 390
},
{
"epoch": 0.4,
"grad_norm": 0.06334829330444336,
"learning_rate": 0.0001915853552407641,
"loss": 0.7282,
"step": 391
},
{
"epoch": 0.4,
"grad_norm": 0.06337782740592957,
"learning_rate": 0.0001915417601945958,
"loss": 0.7331,
"step": 392
},
{
"epoch": 0.41,
"grad_norm": 0.06350013613700867,
"learning_rate": 0.00019149805749646006,
"loss": 0.6549,
"step": 393
},
{
"epoch": 0.41,
"grad_norm": 0.05926767736673355,
"learning_rate": 0.0001914542471977507,
"loss": 0.7421,
"step": 394
},
{
"epoch": 0.41,
"grad_norm": 0.06383328139781952,
"learning_rate": 0.00019141032934998813,
"loss": 0.7322,
"step": 395
},
{
"epoch": 0.41,
"grad_norm": 0.07084285467863083,
"learning_rate": 0.00019136630400481912,
"loss": 0.7155,
"step": 396
},
{
"epoch": 0.41,
"grad_norm": 0.06484265625476837,
"learning_rate": 0.00019132217121401696,
"loss": 0.6974,
"step": 397
},
{
"epoch": 0.41,
"grad_norm": 0.06470082700252533,
"learning_rate": 0.0001912779310294813,
"loss": 0.8072,
"step": 398
},
{
"epoch": 0.41,
"grad_norm": 0.0596977099776268,
"learning_rate": 0.000191233583503238,
"loss": 0.6591,
"step": 399
},
{
"epoch": 0.41,
"grad_norm": 0.057743631303310394,
"learning_rate": 0.0001911891286874392,
"loss": 0.7119,
"step": 400
},
{
"epoch": 0.41,
"grad_norm": 0.06098794564604759,
"learning_rate": 0.00019114456663436324,
"loss": 0.6982,
"step": 401
},
{
"epoch": 0.41,
"grad_norm": 0.05964438244700432,
"learning_rate": 0.00019109989739641447,
"loss": 0.6741,
"step": 402
},
{
"epoch": 0.42,
"grad_norm": 0.06789212673902512,
"learning_rate": 0.00019105512102612343,
"loss": 0.7152,
"step": 403
},
{
"epoch": 0.42,
"grad_norm": 0.06945686042308807,
"learning_rate": 0.00019101023757614656,
"loss": 0.7184,
"step": 404
},
{
"epoch": 0.42,
"grad_norm": 0.06108655408024788,
"learning_rate": 0.0001909652470992662,
"loss": 0.7476,
"step": 405
},
{
"epoch": 0.42,
"grad_norm": 0.06126492843031883,
"learning_rate": 0.0001909201496483906,
"loss": 0.7669,
"step": 406
},
{
"epoch": 0.42,
"grad_norm": 0.06494157016277313,
"learning_rate": 0.00019087494527655384,
"loss": 0.7191,
"step": 407
},
{
"epoch": 0.42,
"grad_norm": 0.06480975449085236,
"learning_rate": 0.00019082963403691563,
"loss": 0.7215,
"step": 408
},
{
"epoch": 0.42,
"grad_norm": 0.06445412337779999,
"learning_rate": 0.00019078421598276148,
"loss": 0.7358,
"step": 409
},
{
"epoch": 0.42,
"grad_norm": 0.06461311876773834,
"learning_rate": 0.00019073869116750244,
"loss": 0.7462,
"step": 410
},
{
"epoch": 0.42,
"grad_norm": 0.06038064882159233,
"learning_rate": 0.00019069305964467508,
"loss": 0.6975,
"step": 411
},
{
"epoch": 0.42,
"grad_norm": 0.058075543493032455,
"learning_rate": 0.00019064732146794157,
"loss": 0.7348,
"step": 412
},
{
"epoch": 0.43,
"grad_norm": 0.06547833234071732,
"learning_rate": 0.00019060147669108942,
"loss": 0.6846,
"step": 413
},
{
"epoch": 0.43,
"grad_norm": 0.05910010635852814,
"learning_rate": 0.00019055552536803146,
"loss": 0.6978,
"step": 414
},
{
"epoch": 0.43,
"grad_norm": 0.061262935400009155,
"learning_rate": 0.00019050946755280594,
"loss": 0.7925,
"step": 415
},
{
"epoch": 0.43,
"grad_norm": 0.06281925737857819,
"learning_rate": 0.00019046330329957622,
"loss": 0.7537,
"step": 416
},
{
"epoch": 0.43,
"grad_norm": 0.06479451060295105,
"learning_rate": 0.00019041703266263094,
"loss": 0.7462,
"step": 417
},
{
"epoch": 0.43,
"grad_norm": 0.06690587103366852,
"learning_rate": 0.00019037065569638375,
"loss": 0.7118,
"step": 418
},
{
"epoch": 0.43,
"grad_norm": 0.06481601297855377,
"learning_rate": 0.00019032417245537343,
"loss": 0.6746,
"step": 419
},
{
"epoch": 0.43,
"grad_norm": 0.06519322842359543,
"learning_rate": 0.0001902775829942636,
"loss": 0.7062,
"step": 420
},
{
"epoch": 0.43,
"grad_norm": 0.058651816099882126,
"learning_rate": 0.000190230887367843,
"loss": 0.7479,
"step": 421
},
{
"epoch": 0.44,
"grad_norm": 0.06371847540140152,
"learning_rate": 0.00019018408563102504,
"loss": 0.7954,
"step": 422
},
{
"epoch": 0.44,
"grad_norm": 0.061327967792749405,
"learning_rate": 0.00019013717783884798,
"loss": 0.6943,
"step": 423
},
{
"epoch": 0.44,
"grad_norm": 0.06613918393850327,
"learning_rate": 0.00019009016404647477,
"loss": 0.7395,
"step": 424
},
{
"epoch": 0.44,
"grad_norm": 0.06330893188714981,
"learning_rate": 0.00019004304430919312,
"loss": 0.6978,
"step": 425
},
{
"epoch": 0.44,
"grad_norm": 0.061978746205568314,
"learning_rate": 0.0001899958186824152,
"loss": 0.6608,
"step": 426
},
{
"epoch": 0.44,
"grad_norm": 0.06049848347902298,
"learning_rate": 0.00018994848722167778,
"loss": 0.8073,
"step": 427
},
{
"epoch": 0.44,
"grad_norm": 0.061442065984010696,
"learning_rate": 0.00018990104998264205,
"loss": 0.6818,
"step": 428
},
{
"epoch": 0.44,
"grad_norm": 0.0643540471792221,
"learning_rate": 0.0001898535070210936,
"loss": 0.7317,
"step": 429
},
{
"epoch": 0.44,
"grad_norm": 0.06444810330867767,
"learning_rate": 0.00018980585839294237,
"loss": 0.6947,
"step": 430
},
{
"epoch": 0.44,
"grad_norm": 0.063243567943573,
"learning_rate": 0.00018975810415422257,
"loss": 0.8226,
"step": 431
},
{
"epoch": 0.45,
"grad_norm": 0.062445830553770065,
"learning_rate": 0.00018971024436109258,
"loss": 0.6881,
"step": 432
},
{
"epoch": 0.45,
"grad_norm": 0.06391721218824387,
"learning_rate": 0.00018966227906983492,
"loss": 0.7531,
"step": 433
},
{
"epoch": 0.45,
"grad_norm": 0.07556512951850891,
"learning_rate": 0.00018961420833685618,
"loss": 0.6901,
"step": 434
},
{
"epoch": 0.45,
"grad_norm": 0.07024349272251129,
"learning_rate": 0.0001895660322186869,
"loss": 0.6927,
"step": 435
},
{
"epoch": 0.45,
"grad_norm": 0.062289077788591385,
"learning_rate": 0.00018951775077198163,
"loss": 0.6386,
"step": 436
},
{
"epoch": 0.45,
"grad_norm": 0.0685746967792511,
"learning_rate": 0.00018946936405351875,
"loss": 0.7119,
"step": 437
},
{
"epoch": 0.45,
"grad_norm": 0.06907238811254501,
"learning_rate": 0.00018942087212020045,
"loss": 0.767,
"step": 438
},
{
"epoch": 0.45,
"grad_norm": 0.05862850695848465,
"learning_rate": 0.0001893722750290526,
"loss": 0.7121,
"step": 439
},
{
"epoch": 0.45,
"grad_norm": 0.06181567534804344,
"learning_rate": 0.00018932357283722482,
"loss": 0.709,
"step": 440
},
{
"epoch": 0.45,
"grad_norm": 0.06529933214187622,
"learning_rate": 0.00018927476560199022,
"loss": 0.7013,
"step": 441
},
{
"epoch": 0.46,
"grad_norm": 0.06098626181483269,
"learning_rate": 0.00018922585338074555,
"loss": 0.6641,
"step": 442
},
{
"epoch": 0.46,
"grad_norm": 0.06008550152182579,
"learning_rate": 0.00018917683623101094,
"loss": 0.7429,
"step": 443
},
{
"epoch": 0.46,
"grad_norm": 0.06048089265823364,
"learning_rate": 0.00018912771421042995,
"loss": 0.7312,
"step": 444
},
{
"epoch": 0.46,
"grad_norm": 0.06655411422252655,
"learning_rate": 0.0001890784873767695,
"loss": 0.7395,
"step": 445
},
{
"epoch": 0.46,
"grad_norm": 0.07008501142263412,
"learning_rate": 0.00018902915578791968,
"loss": 0.7715,
"step": 446
},
{
"epoch": 0.46,
"grad_norm": 0.07080001384019852,
"learning_rate": 0.00018897971950189386,
"loss": 0.7745,
"step": 447
},
{
"epoch": 0.46,
"grad_norm": 0.06971325725317001,
"learning_rate": 0.00018893017857682848,
"loss": 0.7391,
"step": 448
},
{
"epoch": 0.46,
"grad_norm": 0.06647112965583801,
"learning_rate": 0.00018888053307098305,
"loss": 0.6923,
"step": 449
},
{
"epoch": 0.46,
"grad_norm": 0.06085026636719704,
"learning_rate": 0.0001888307830427401,
"loss": 0.7561,
"step": 450
},
{
"epoch": 0.47,
"grad_norm": 0.06949692964553833,
"learning_rate": 0.00018878092855060497,
"loss": 0.7839,
"step": 451
},
{
"epoch": 0.47,
"grad_norm": 0.0794142335653305,
"learning_rate": 0.00018873096965320595,
"loss": 0.6829,
"step": 452
},
{
"epoch": 0.47,
"grad_norm": 0.07217901945114136,
"learning_rate": 0.00018868090640929413,
"loss": 0.7016,
"step": 453
},
{
"epoch": 0.47,
"grad_norm": 0.06830142438411713,
"learning_rate": 0.00018863073887774318,
"loss": 0.7442,
"step": 454
},
{
"epoch": 0.47,
"grad_norm": 0.0632953941822052,
"learning_rate": 0.00018858046711754954,
"loss": 0.7711,
"step": 455
},
{
"epoch": 0.47,
"grad_norm": 0.0693698301911354,
"learning_rate": 0.00018853009118783217,
"loss": 0.8282,
"step": 456
},
{
"epoch": 0.47,
"grad_norm": 0.061216652393341064,
"learning_rate": 0.00018847961114783256,
"loss": 0.7011,
"step": 457
},
{
"epoch": 0.47,
"grad_norm": 0.06573464721441269,
"learning_rate": 0.00018842902705691454,
"loss": 0.7428,
"step": 458
},
{
"epoch": 0.47,
"grad_norm": 0.06309016048908234,
"learning_rate": 0.0001883783389745644,
"loss": 0.7338,
"step": 459
},
{
"epoch": 0.47,
"grad_norm": 0.06523136794567108,
"learning_rate": 0.00018832754696039073,
"loss": 0.7215,
"step": 460
},
{
"epoch": 0.48,
"grad_norm": 0.06510775536298752,
"learning_rate": 0.00018827665107412424,
"loss": 0.7303,
"step": 461
},
{
"epoch": 0.48,
"grad_norm": 0.06285244226455688,
"learning_rate": 0.0001882256513756179,
"loss": 0.8391,
"step": 462
},
{
"epoch": 0.48,
"grad_norm": 0.06129281967878342,
"learning_rate": 0.0001881745479248467,
"loss": 0.651,
"step": 463
},
{
"epoch": 0.48,
"grad_norm": 0.06267630308866501,
"learning_rate": 0.00018812334078190769,
"loss": 0.6906,
"step": 464
},
{
"epoch": 0.48,
"grad_norm": 0.06995107978582382,
"learning_rate": 0.00018807203000701978,
"loss": 0.7636,
"step": 465
},
{
"epoch": 0.48,
"grad_norm": 0.06257961690425873,
"learning_rate": 0.00018802061566052384,
"loss": 0.7336,
"step": 466
},
{
"epoch": 0.48,
"grad_norm": 0.061007607728242874,
"learning_rate": 0.00018796909780288248,
"loss": 0.6634,
"step": 467
},
{
"epoch": 0.48,
"grad_norm": 0.06716372072696686,
"learning_rate": 0.0001879174764946801,
"loss": 0.7982,
"step": 468
},
{
"epoch": 0.48,
"grad_norm": 0.06520219892263412,
"learning_rate": 0.00018786575179662265,
"loss": 0.7175,
"step": 469
},
{
"epoch": 0.48,
"grad_norm": 0.06266435235738754,
"learning_rate": 0.00018781392376953778,
"loss": 0.8244,
"step": 470
},
{
"epoch": 0.49,
"grad_norm": 0.06289061903953552,
"learning_rate": 0.00018776199247437457,
"loss": 0.7139,
"step": 471
},
{
"epoch": 0.49,
"grad_norm": 0.06526096910238266,
"learning_rate": 0.00018770995797220356,
"loss": 0.7111,
"step": 472
},
{
"epoch": 0.49,
"grad_norm": 0.06748399138450623,
"learning_rate": 0.00018765782032421674,
"loss": 0.7057,
"step": 473
},
{
"epoch": 0.49,
"grad_norm": 0.06067729741334915,
"learning_rate": 0.0001876055795917273,
"loss": 0.616,
"step": 474
},
{
"epoch": 0.49,
"grad_norm": 0.07784713804721832,
"learning_rate": 0.00018755323583616966,
"loss": 0.7285,
"step": 475
},
{
"epoch": 0.49,
"grad_norm": 0.06458035111427307,
"learning_rate": 0.00018750078911909947,
"loss": 0.7296,
"step": 476
},
{
"epoch": 0.49,
"grad_norm": 0.06379619985818863,
"learning_rate": 0.00018744823950219339,
"loss": 0.6915,
"step": 477
},
{
"epoch": 0.49,
"grad_norm": 0.06064402312040329,
"learning_rate": 0.00018739558704724911,
"loss": 0.7291,
"step": 478
},
{
"epoch": 0.49,
"grad_norm": 0.06831635534763336,
"learning_rate": 0.00018734283181618525,
"loss": 0.7506,
"step": 479
},
{
"epoch": 0.49,
"grad_norm": 0.06278979033231735,
"learning_rate": 0.00018728997387104136,
"loss": 0.7389,
"step": 480
},
{
"epoch": 0.5,
"grad_norm": 0.072098508477211,
"learning_rate": 0.00018723701327397766,
"loss": 0.7571,
"step": 481
},
{
"epoch": 0.5,
"grad_norm": 0.14378315210342407,
"learning_rate": 0.00018718395008727518,
"loss": 0.7914,
"step": 482
},
{
"epoch": 0.5,
"grad_norm": 0.06290939450263977,
"learning_rate": 0.0001871307843733356,
"loss": 0.6587,
"step": 483
},
{
"epoch": 0.5,
"grad_norm": 0.06457720696926117,
"learning_rate": 0.00018707751619468103,
"loss": 0.6835,
"step": 484
},
{
"epoch": 0.5,
"grad_norm": 0.07224012911319733,
"learning_rate": 0.0001870241456139543,
"loss": 0.6609,
"step": 485
},
{
"epoch": 0.5,
"grad_norm": 0.06716971844434738,
"learning_rate": 0.00018697067269391848,
"loss": 0.7688,
"step": 486
},
{
"epoch": 0.5,
"eval_loss": 0.8164525628089905,
"eval_runtime": 111.6882,
"eval_samples_per_second": 88.98,
"eval_steps_per_second": 22.249,
"step": 486
},
{
"epoch": 0.5,
"grad_norm": 0.07206602394580841,
"learning_rate": 0.00018691709749745706,
"loss": 0.7399,
"step": 487
},
{
"epoch": 0.5,
"grad_norm": 0.06269916892051697,
"learning_rate": 0.00018686342008757383,
"loss": 0.6564,
"step": 488
},
{
"epoch": 0.5,
"grad_norm": 0.06308487802743912,
"learning_rate": 0.00018680964052739273,
"loss": 0.7436,
"step": 489
},
{
"epoch": 0.51,
"grad_norm": 0.06728991121053696,
"learning_rate": 0.0001867557588801579,
"loss": 0.7415,
"step": 490
},
{
"epoch": 0.51,
"grad_norm": 0.06449094414710999,
"learning_rate": 0.0001867017752092334,
"loss": 0.7578,
"step": 491
},
{
"epoch": 0.51,
"grad_norm": 0.08734392374753952,
"learning_rate": 0.0001866476895781034,
"loss": 0.7041,
"step": 492
},
{
"epoch": 0.51,
"grad_norm": 0.06480298936367035,
"learning_rate": 0.0001865935020503719,
"loss": 0.7842,
"step": 493
},
{
"epoch": 0.51,
"grad_norm": 0.09008181840181351,
"learning_rate": 0.00018653921268976285,
"loss": 0.6765,
"step": 494
},
{
"epoch": 0.51,
"grad_norm": 0.06579532474279404,
"learning_rate": 0.00018648482156011977,
"loss": 0.6687,
"step": 495
},
{
"epoch": 0.51,
"grad_norm": 0.06645182520151138,
"learning_rate": 0.000186430328725406,
"loss": 0.7584,
"step": 496
},
{
"epoch": 0.51,
"grad_norm": 0.06692402064800262,
"learning_rate": 0.00018637573424970435,
"loss": 0.7336,
"step": 497
},
{
"epoch": 0.51,
"grad_norm": 0.06754589080810547,
"learning_rate": 0.00018632103819721731,
"loss": 0.7692,
"step": 498
},
{
"epoch": 0.51,
"grad_norm": 0.06759774684906006,
"learning_rate": 0.00018626624063226675,
"loss": 0.792,
"step": 499
},
{
"epoch": 0.52,
"grad_norm": 0.06528620421886444,
"learning_rate": 0.00018621134161929393,
"loss": 0.76,
"step": 500
},
{
"epoch": 0.52,
"grad_norm": 0.07810235023498535,
"learning_rate": 0.00018615634122285936,
"loss": 0.7695,
"step": 501
},
{
"epoch": 0.52,
"grad_norm": 0.06633520126342773,
"learning_rate": 0.00018610123950764287,
"loss": 0.711,
"step": 502
},
{
"epoch": 0.52,
"grad_norm": 0.0723094642162323,
"learning_rate": 0.00018604603653844337,
"loss": 0.7693,
"step": 503
},
{
"epoch": 0.52,
"grad_norm": 0.06438109278678894,
"learning_rate": 0.00018599073238017888,
"loss": 0.6729,
"step": 504
},
{
"epoch": 0.52,
"grad_norm": 0.062286246567964554,
"learning_rate": 0.00018593532709788635,
"loss": 0.6951,
"step": 505
},
{
"epoch": 0.52,
"grad_norm": 0.06298733502626419,
"learning_rate": 0.00018587982075672174,
"loss": 0.6774,
"step": 506
},
{
"epoch": 0.52,
"grad_norm": 0.06751766055822372,
"learning_rate": 0.00018582421342195978,
"loss": 0.7711,
"step": 507
},
{
"epoch": 0.52,
"grad_norm": 0.06741178035736084,
"learning_rate": 0.0001857685051589941,
"loss": 0.706,
"step": 508
},
{
"epoch": 0.52,
"grad_norm": 0.09620961546897888,
"learning_rate": 0.00018571269603333677,
"loss": 0.6802,
"step": 509
},
{
"epoch": 0.53,
"grad_norm": 0.07265791296958923,
"learning_rate": 0.00018565678611061877,
"loss": 0.7392,
"step": 510
},
{
"epoch": 0.53,
"grad_norm": 0.06844795495271683,
"learning_rate": 0.0001856007754565894,
"loss": 0.7633,
"step": 511
},
{
"epoch": 0.53,
"grad_norm": 0.07247466593980789,
"learning_rate": 0.00018554466413711645,
"loss": 0.6176,
"step": 512
},
{
"epoch": 0.53,
"grad_norm": 0.06879562884569168,
"learning_rate": 0.0001854884522181862,
"loss": 0.7368,
"step": 513
},
{
"epoch": 0.53,
"grad_norm": 0.06788138300180435,
"learning_rate": 0.0001854321397659031,
"loss": 0.7507,
"step": 514
},
{
"epoch": 0.53,
"grad_norm": 0.06438209116458893,
"learning_rate": 0.00018537572684649,
"loss": 0.7451,
"step": 515
},
{
"epoch": 0.53,
"grad_norm": 0.06962509453296661,
"learning_rate": 0.00018531921352628772,
"loss": 0.747,
"step": 516
},
{
"epoch": 0.53,
"grad_norm": 0.07272413372993469,
"learning_rate": 0.00018526259987175522,
"loss": 0.6815,
"step": 517
},
{
"epoch": 0.53,
"grad_norm": 0.07266836613416672,
"learning_rate": 0.0001852058859494694,
"loss": 0.7337,
"step": 518
},
{
"epoch": 0.54,
"grad_norm": 0.06824107468128204,
"learning_rate": 0.00018514907182612525,
"loss": 0.7137,
"step": 519
},
{
"epoch": 0.54,
"grad_norm": 0.0668516680598259,
"learning_rate": 0.00018509215756853535,
"loss": 0.7391,
"step": 520
},
{
"epoch": 0.54,
"grad_norm": 0.06273160874843597,
"learning_rate": 0.00018503514324363023,
"loss": 0.75,
"step": 521
},
{
"epoch": 0.54,
"grad_norm": 0.06964839994907379,
"learning_rate": 0.000184978028918458,
"loss": 0.723,
"step": 522
},
{
"epoch": 0.54,
"grad_norm": 0.06386502832174301,
"learning_rate": 0.00018492081466018435,
"loss": 0.7156,
"step": 523
},
{
"epoch": 0.54,
"grad_norm": 0.06581712514162064,
"learning_rate": 0.00018486350053609262,
"loss": 0.7569,
"step": 524
},
{
"epoch": 0.54,
"grad_norm": 0.07477328926324844,
"learning_rate": 0.00018480608661358344,
"loss": 0.8663,
"step": 525
},
{
"epoch": 0.54,
"grad_norm": 0.07748843729496002,
"learning_rate": 0.0001847485729601749,
"loss": 0.5842,
"step": 526
},
{
"epoch": 0.54,
"grad_norm": 0.06755229830741882,
"learning_rate": 0.00018469095964350233,
"loss": 0.7617,
"step": 527
},
{
"epoch": 0.54,
"grad_norm": 0.0634811520576477,
"learning_rate": 0.00018463324673131823,
"loss": 0.7125,
"step": 528
},
{
"epoch": 0.55,
"grad_norm": 0.06521796435117722,
"learning_rate": 0.00018457543429149234,
"loss": 0.7251,
"step": 529
},
{
"epoch": 0.55,
"grad_norm": 0.07096633315086365,
"learning_rate": 0.0001845175223920113,
"loss": 0.6871,
"step": 530
},
{
"epoch": 0.55,
"grad_norm": 0.066802479326725,
"learning_rate": 0.00018445951110097883,
"loss": 0.7262,
"step": 531
},
{
"epoch": 0.55,
"grad_norm": 0.06430657207965851,
"learning_rate": 0.00018440140048661547,
"loss": 0.7712,
"step": 532
},
{
"epoch": 0.55,
"grad_norm": 0.06193665415048599,
"learning_rate": 0.00018434319061725858,
"loss": 0.6698,
"step": 533
},
{
"epoch": 0.55,
"grad_norm": 0.062316689640283585,
"learning_rate": 0.0001842848815613622,
"loss": 0.7413,
"step": 534
},
{
"epoch": 0.55,
"grad_norm": 0.06545069813728333,
"learning_rate": 0.00018422647338749707,
"loss": 0.6857,
"step": 535
},
{
"epoch": 0.55,
"grad_norm": 0.0639072060585022,
"learning_rate": 0.00018416796616435054,
"loss": 0.6938,
"step": 536
},
{
"epoch": 0.55,
"grad_norm": 0.06502430140972137,
"learning_rate": 0.0001841093599607263,
"loss": 0.7396,
"step": 537
},
{
"epoch": 0.55,
"grad_norm": 0.06591713428497314,
"learning_rate": 0.00018405065484554452,
"loss": 0.6659,
"step": 538
},
{
"epoch": 0.56,
"grad_norm": 0.063933365046978,
"learning_rate": 0.0001839918508878417,
"loss": 0.7334,
"step": 539
},
{
"epoch": 0.56,
"grad_norm": 0.06484158337116241,
"learning_rate": 0.00018393294815677056,
"loss": 0.7337,
"step": 540
},
{
"epoch": 0.56,
"grad_norm": 0.066524937748909,
"learning_rate": 0.0001838739467216,
"loss": 0.7174,
"step": 541
},
{
"epoch": 0.56,
"grad_norm": 0.0775236114859581,
"learning_rate": 0.0001838148466517149,
"loss": 0.7697,
"step": 542
},
{
"epoch": 0.56,
"grad_norm": 0.06475924700498581,
"learning_rate": 0.0001837556480166163,
"loss": 0.7568,
"step": 543
},
{
"epoch": 0.56,
"grad_norm": 0.06703387200832367,
"learning_rate": 0.000183696350885921,
"loss": 0.7124,
"step": 544
},
{
"epoch": 0.56,
"grad_norm": 0.0641862079501152,
"learning_rate": 0.00018363695532936164,
"loss": 0.707,
"step": 545
},
{
"epoch": 0.56,
"grad_norm": 0.06697754561901093,
"learning_rate": 0.00018357746141678676,
"loss": 0.7185,
"step": 546
},
{
"epoch": 0.56,
"grad_norm": 0.06705842912197113,
"learning_rate": 0.00018351786921816038,
"loss": 0.7649,
"step": 547
},
{
"epoch": 0.57,
"grad_norm": 0.06511738151311874,
"learning_rate": 0.00018345817880356218,
"loss": 0.722,
"step": 548
},
{
"epoch": 0.57,
"grad_norm": 0.06530781090259552,
"learning_rate": 0.0001833983902431874,
"loss": 0.7143,
"step": 549
},
{
"epoch": 0.57,
"grad_norm": 0.06371057033538818,
"learning_rate": 0.00018333850360734662,
"loss": 0.6969,
"step": 550
},
{
"epoch": 0.57,
"grad_norm": 0.07265166938304901,
"learning_rate": 0.00018327851896646574,
"loss": 0.7703,
"step": 551
},
{
"epoch": 0.57,
"grad_norm": 0.07043513655662537,
"learning_rate": 0.000183218436391086,
"loss": 0.7416,
"step": 552
},
{
"epoch": 0.57,
"grad_norm": 0.06820254027843475,
"learning_rate": 0.0001831582559518637,
"loss": 0.7941,
"step": 553
},
{
"epoch": 0.57,
"grad_norm": 0.0645156055688858,
"learning_rate": 0.00018309797771957036,
"loss": 0.7385,
"step": 554
},
{
"epoch": 0.57,
"grad_norm": 0.06690391153097153,
"learning_rate": 0.00018303760176509234,
"loss": 0.7509,
"step": 555
},
{
"epoch": 0.57,
"grad_norm": 0.06634744256734848,
"learning_rate": 0.00018297712815943109,
"loss": 0.6547,
"step": 556
},
{
"epoch": 0.57,
"grad_norm": 0.06777124106884003,
"learning_rate": 0.00018291655697370276,
"loss": 0.7103,
"step": 557
},
{
"epoch": 0.58,
"grad_norm": 0.06453371793031693,
"learning_rate": 0.00018285588827913834,
"loss": 0.7032,
"step": 558
},
{
"epoch": 0.58,
"grad_norm": 0.06689512729644775,
"learning_rate": 0.00018279512214708344,
"loss": 0.7677,
"step": 559
},
{
"epoch": 0.58,
"grad_norm": 0.06096369028091431,
"learning_rate": 0.00018273425864899829,
"loss": 0.5788,
"step": 560
},
{
"epoch": 0.58,
"grad_norm": 0.06912747025489807,
"learning_rate": 0.0001826732978564576,
"loss": 0.7094,
"step": 561
},
{
"epoch": 0.58,
"grad_norm": 0.06365373730659485,
"learning_rate": 0.0001826122398411505,
"loss": 0.6554,
"step": 562
},
{
"epoch": 0.58,
"grad_norm": 0.06549493223428726,
"learning_rate": 0.00018255108467488045,
"loss": 0.6927,
"step": 563
},
{
"epoch": 0.58,
"grad_norm": 0.06063194200396538,
"learning_rate": 0.00018248983242956515,
"loss": 0.7077,
"step": 564
},
{
"epoch": 0.58,
"grad_norm": 0.06569315493106842,
"learning_rate": 0.00018242848317723647,
"loss": 0.704,
"step": 565
},
{
"epoch": 0.58,
"grad_norm": 0.0646178349852562,
"learning_rate": 0.0001823670369900404,
"loss": 0.6658,
"step": 566
},
{
"epoch": 0.58,
"grad_norm": 0.06432666629552841,
"learning_rate": 0.0001823054939402369,
"loss": 0.7117,
"step": 567
},
{
"epoch": 0.59,
"grad_norm": 0.06953758746385574,
"learning_rate": 0.00018224385410019976,
"loss": 0.657,
"step": 568
},
{
"epoch": 0.59,
"grad_norm": 0.0662669911980629,
"learning_rate": 0.00018218211754241666,
"loss": 0.7467,
"step": 569
},
{
"epoch": 0.59,
"grad_norm": 0.06994238495826721,
"learning_rate": 0.00018212028433948915,
"loss": 0.771,
"step": 570
},
{
"epoch": 0.59,
"grad_norm": 0.06679581850767136,
"learning_rate": 0.0001820583545641322,
"loss": 0.7267,
"step": 571
},
{
"epoch": 0.59,
"grad_norm": 0.07098355144262314,
"learning_rate": 0.00018199632828917446,
"loss": 0.7201,
"step": 572
},
{
"epoch": 0.59,
"grad_norm": 0.06492327153682709,
"learning_rate": 0.00018193420558755807,
"loss": 0.7125,
"step": 573
},
{
"epoch": 0.59,
"grad_norm": 0.06887879967689514,
"learning_rate": 0.00018187198653233855,
"loss": 0.6887,
"step": 574
},
{
"epoch": 0.59,
"grad_norm": 0.06757469475269318,
"learning_rate": 0.00018180967119668472,
"loss": 0.6571,
"step": 575
},
{
"epoch": 0.59,
"grad_norm": 0.06290429830551147,
"learning_rate": 0.00018174725965387867,
"loss": 0.702,
"step": 576
},
{
"epoch": 0.59,
"grad_norm": 0.06352391839027405,
"learning_rate": 0.00018168475197731553,
"loss": 0.7017,
"step": 577
},
{
"epoch": 0.6,
"grad_norm": 0.06685255467891693,
"learning_rate": 0.00018162214824050366,
"loss": 0.7533,
"step": 578
},
{
"epoch": 0.6,
"grad_norm": 0.06117778643965721,
"learning_rate": 0.00018155944851706415,
"loss": 0.72,
"step": 579
},
{
"epoch": 0.6,
"grad_norm": 0.0662195160984993,
"learning_rate": 0.00018149665288073115,
"loss": 0.7206,
"step": 580
},
{
"epoch": 0.6,
"grad_norm": 0.0689619854092598,
"learning_rate": 0.00018143376140535158,
"loss": 0.6461,
"step": 581
},
{
"epoch": 0.6,
"grad_norm": 0.06290092319250107,
"learning_rate": 0.00018137077416488496,
"loss": 0.7368,
"step": 582
},
{
"epoch": 0.6,
"grad_norm": 0.06402301788330078,
"learning_rate": 0.00018130769123340351,
"loss": 0.6917,
"step": 583
},
{
"epoch": 0.6,
"grad_norm": 0.07769487798213959,
"learning_rate": 0.00018124451268509203,
"loss": 0.7289,
"step": 584
},
{
"epoch": 0.6,
"grad_norm": 0.06879512220621109,
"learning_rate": 0.00018118123859424764,
"loss": 0.7492,
"step": 585
},
{
"epoch": 0.6,
"grad_norm": 0.07164003700017929,
"learning_rate": 0.00018111786903527994,
"loss": 0.6481,
"step": 586
},
{
"epoch": 0.61,
"grad_norm": 0.06826422363519669,
"learning_rate": 0.00018105440408271067,
"loss": 0.7569,
"step": 587
},
{
"epoch": 0.61,
"grad_norm": 0.07842717319726944,
"learning_rate": 0.00018099084381117386,
"loss": 0.73,
"step": 588
},
{
"epoch": 0.61,
"grad_norm": 0.06824488192796707,
"learning_rate": 0.0001809271882954156,
"loss": 0.6665,
"step": 589
},
{
"epoch": 0.61,
"grad_norm": 0.06883325427770615,
"learning_rate": 0.000180863437610294,
"loss": 0.6761,
"step": 590
},
{
"epoch": 0.61,
"grad_norm": 0.06425601989030838,
"learning_rate": 0.000180799591830779,
"loss": 0.6317,
"step": 591
},
{
"epoch": 0.61,
"grad_norm": 0.06907620280981064,
"learning_rate": 0.00018073565103195253,
"loss": 0.6956,
"step": 592
},
{
"epoch": 0.61,
"grad_norm": 0.06852835416793823,
"learning_rate": 0.0001806716152890081,
"loss": 0.7079,
"step": 593
},
{
"epoch": 0.61,
"grad_norm": 0.07164844870567322,
"learning_rate": 0.00018060748467725097,
"loss": 0.7305,
"step": 594
},
{
"epoch": 0.61,
"grad_norm": 0.06715549528598785,
"learning_rate": 0.00018054325927209795,
"loss": 0.6846,
"step": 595
},
{
"epoch": 0.61,
"grad_norm": 0.06505031883716583,
"learning_rate": 0.00018047893914907733,
"loss": 0.7758,
"step": 596
},
{
"epoch": 0.62,
"grad_norm": 0.06611162424087524,
"learning_rate": 0.0001804145243838287,
"loss": 0.7493,
"step": 597
},
{
"epoch": 0.62,
"grad_norm": 0.06780173629522324,
"learning_rate": 0.00018035001505210308,
"loss": 0.7347,
"step": 598
},
{
"epoch": 0.62,
"grad_norm": 0.06760706007480621,
"learning_rate": 0.00018028541122976264,
"loss": 0.7149,
"step": 599
},
{
"epoch": 0.62,
"grad_norm": 0.06549396365880966,
"learning_rate": 0.00018022071299278065,
"loss": 0.7057,
"step": 600
},
{
"epoch": 0.62,
"grad_norm": 0.06889012455940247,
"learning_rate": 0.00018015592041724143,
"loss": 0.7686,
"step": 601
},
{
"epoch": 0.62,
"grad_norm": 0.06646202504634857,
"learning_rate": 0.00018009103357934024,
"loss": 0.7379,
"step": 602
},
{
"epoch": 0.62,
"grad_norm": 0.06753333657979965,
"learning_rate": 0.00018002605255538324,
"loss": 0.6518,
"step": 603
},
{
"epoch": 0.62,
"grad_norm": 0.06427349895238876,
"learning_rate": 0.0001799609774217872,
"loss": 0.6816,
"step": 604
},
{
"epoch": 0.62,
"grad_norm": 0.0671876072883606,
"learning_rate": 0.00017989580825507974,
"loss": 0.6166,
"step": 605
},
{
"epoch": 0.62,
"grad_norm": 0.06923870742321014,
"learning_rate": 0.00017983054513189898,
"loss": 0.7363,
"step": 606
},
{
"epoch": 0.63,
"grad_norm": 0.06566382944583893,
"learning_rate": 0.00017976518812899353,
"loss": 0.6905,
"step": 607
},
{
"epoch": 0.63,
"grad_norm": 0.06720302253961563,
"learning_rate": 0.0001796997373232224,
"loss": 0.7466,
"step": 608
},
{
"epoch": 0.63,
"grad_norm": 0.07467524707317352,
"learning_rate": 0.00017963419279155493,
"loss": 0.6771,
"step": 609
},
{
"epoch": 0.63,
"grad_norm": 0.06185929477214813,
"learning_rate": 0.00017956855461107068,
"loss": 0.7175,
"step": 610
},
{
"epoch": 0.63,
"grad_norm": 0.08537810295820236,
"learning_rate": 0.0001795028228589593,
"loss": 0.7694,
"step": 611
},
{
"epoch": 0.63,
"grad_norm": 0.06563472747802734,
"learning_rate": 0.00017943699761252055,
"loss": 0.7425,
"step": 612
},
{
"epoch": 0.63,
"grad_norm": 0.06966511905193329,
"learning_rate": 0.00017937107894916407,
"loss": 0.8045,
"step": 613
},
{
"epoch": 0.63,
"grad_norm": 0.0687708631157875,
"learning_rate": 0.0001793050669464094,
"loss": 0.7076,
"step": 614
},
{
"epoch": 0.63,
"grad_norm": 0.06829386204481125,
"learning_rate": 0.00017923896168188584,
"loss": 0.6838,
"step": 615
},
{
"epoch": 0.64,
"grad_norm": 0.06806325167417526,
"learning_rate": 0.0001791727632333323,
"loss": 0.6726,
"step": 616
},
{
"epoch": 0.64,
"grad_norm": 0.06878352910280228,
"learning_rate": 0.00017910647167859741,
"loss": 0.7273,
"step": 617
},
{
"epoch": 0.64,
"grad_norm": 0.07277524471282959,
"learning_rate": 0.00017904008709563915,
"loss": 0.6682,
"step": 618
},
{
"epoch": 0.64,
"grad_norm": 0.06648612022399902,
"learning_rate": 0.000178973609562525,
"loss": 0.7046,
"step": 619
},
{
"epoch": 0.64,
"grad_norm": 0.06936463713645935,
"learning_rate": 0.00017890703915743168,
"loss": 0.8204,
"step": 620
},
{
"epoch": 0.64,
"grad_norm": 0.07113652676343918,
"learning_rate": 0.00017884037595864517,
"loss": 0.7009,
"step": 621
},
{
"epoch": 0.64,
"grad_norm": 0.07027497887611389,
"learning_rate": 0.00017877362004456058,
"loss": 0.7249,
"step": 622
},
{
"epoch": 0.64,
"grad_norm": 0.0664186030626297,
"learning_rate": 0.000178706771493682,
"loss": 0.7165,
"step": 623
},
{
"epoch": 0.64,
"grad_norm": 0.06796207278966904,
"learning_rate": 0.0001786398303846225,
"loss": 0.7544,
"step": 624
},
{
"epoch": 0.64,
"grad_norm": 0.06871851533651352,
"learning_rate": 0.00017857279679610397,
"loss": 0.7431,
"step": 625
},
{
"epoch": 0.65,
"grad_norm": 0.07132145762443542,
"learning_rate": 0.0001785056708069571,
"loss": 0.7186,
"step": 626
},
{
"epoch": 0.65,
"grad_norm": 0.0689890906214714,
"learning_rate": 0.00017843845249612122,
"loss": 0.7305,
"step": 627
},
{
"epoch": 0.65,
"grad_norm": 0.06329981237649918,
"learning_rate": 0.0001783711419426442,
"loss": 0.6951,
"step": 628
},
{
"epoch": 0.65,
"grad_norm": 0.06915485858917236,
"learning_rate": 0.00017830373922568245,
"loss": 0.6136,
"step": 629
},
{
"epoch": 0.65,
"grad_norm": 0.07033320516347885,
"learning_rate": 0.00017823624442450065,
"loss": 0.7022,
"step": 630
},
{
"epoch": 0.65,
"grad_norm": 0.07259360700845718,
"learning_rate": 0.00017816865761847197,
"loss": 0.7394,
"step": 631
},
{
"epoch": 0.65,
"grad_norm": 0.06609155982732773,
"learning_rate": 0.0001781009788870775,
"loss": 0.6955,
"step": 632
},
{
"epoch": 0.65,
"grad_norm": 0.07294370979070663,
"learning_rate": 0.00017803320830990667,
"loss": 0.6823,
"step": 633
},
{
"epoch": 0.65,
"grad_norm": 0.0686420202255249,
"learning_rate": 0.00017796534596665686,
"loss": 0.6778,
"step": 634
},
{
"epoch": 0.65,
"grad_norm": 0.06810775399208069,
"learning_rate": 0.00017789739193713324,
"loss": 0.7313,
"step": 635
},
{
"epoch": 0.66,
"grad_norm": 0.06998413801193237,
"learning_rate": 0.000177829346301249,
"loss": 0.6917,
"step": 636
},
{
"epoch": 0.66,
"grad_norm": 0.08018749952316284,
"learning_rate": 0.00017776120913902489,
"loss": 0.7806,
"step": 637
},
{
"epoch": 0.66,
"grad_norm": 0.07463950663805008,
"learning_rate": 0.0001776929805305894,
"loss": 0.7387,
"step": 638
},
{
"epoch": 0.66,
"grad_norm": 0.06867985427379608,
"learning_rate": 0.0001776246605561785,
"loss": 0.7435,
"step": 639
},
{
"epoch": 0.66,
"grad_norm": 0.07185962051153183,
"learning_rate": 0.00017755624929613565,
"loss": 0.7688,
"step": 640
},
{
"epoch": 0.66,
"grad_norm": 0.0686650425195694,
"learning_rate": 0.00017748774683091164,
"loss": 0.7115,
"step": 641
},
{
"epoch": 0.66,
"grad_norm": 0.06798288226127625,
"learning_rate": 0.00017741915324106445,
"loss": 0.7755,
"step": 642
},
{
"epoch": 0.66,
"grad_norm": 0.07337713241577148,
"learning_rate": 0.0001773504686072594,
"loss": 0.7563,
"step": 643
},
{
"epoch": 0.66,
"grad_norm": 0.07286756485700607,
"learning_rate": 0.00017728169301026864,
"loss": 0.7242,
"step": 644
},
{
"epoch": 0.67,
"grad_norm": 0.0704449713230133,
"learning_rate": 0.00017721282653097148,
"loss": 0.7912,
"step": 645
},
{
"epoch": 0.67,
"grad_norm": 0.06909286230802536,
"learning_rate": 0.00017714386925035406,
"loss": 0.6845,
"step": 646
},
{
"epoch": 0.67,
"grad_norm": 0.07194187492132187,
"learning_rate": 0.00017707482124950923,
"loss": 0.7388,
"step": 647
},
{
"epoch": 0.67,
"grad_norm": 0.09377000480890274,
"learning_rate": 0.00017700568260963658,
"loss": 0.7118,
"step": 648
},
{
"epoch": 0.67,
"grad_norm": 0.06767406314611435,
"learning_rate": 0.00017693645341204236,
"loss": 0.6796,
"step": 649
},
{
"epoch": 0.67,
"grad_norm": 0.07329147309064865,
"learning_rate": 0.00017686713373813917,
"loss": 0.635,
"step": 650
},
{
"epoch": 0.67,
"grad_norm": 0.06956500560045242,
"learning_rate": 0.00017679772366944609,
"loss": 0.7316,
"step": 651
},
{
"epoch": 0.67,
"grad_norm": 0.07110968232154846,
"learning_rate": 0.00017672822328758853,
"loss": 0.7194,
"step": 652
},
{
"epoch": 0.67,
"grad_norm": 0.06750847399234772,
"learning_rate": 0.00017665863267429802,
"loss": 0.6878,
"step": 653
},
{
"epoch": 0.67,
"grad_norm": 0.0709410011768341,
"learning_rate": 0.00017658895191141225,
"loss": 0.7374,
"step": 654
},
{
"epoch": 0.68,
"grad_norm": 0.06910226494073868,
"learning_rate": 0.00017651918108087502,
"loss": 0.7234,
"step": 655
},
{
"epoch": 0.68,
"grad_norm": 0.07859506458044052,
"learning_rate": 0.00017644932026473584,
"loss": 0.7187,
"step": 656
},
{
"epoch": 0.68,
"grad_norm": 0.06830095499753952,
"learning_rate": 0.00017637936954515023,
"loss": 0.6305,
"step": 657
},
{
"epoch": 0.68,
"grad_norm": 0.07022988796234131,
"learning_rate": 0.00017630932900437936,
"loss": 0.6217,
"step": 658
},
{
"epoch": 0.68,
"grad_norm": 0.06744571030139923,
"learning_rate": 0.00017623919872479,
"loss": 0.6997,
"step": 659
},
{
"epoch": 0.68,
"grad_norm": 0.0686475932598114,
"learning_rate": 0.00017616897878885453,
"loss": 0.6887,
"step": 660
},
{
"epoch": 0.68,
"grad_norm": 0.06921619921922684,
"learning_rate": 0.00017609866927915065,
"loss": 0.695,
"step": 661
},
{
"epoch": 0.68,
"grad_norm": 0.07123583555221558,
"learning_rate": 0.00017602827027836152,
"loss": 0.7236,
"step": 662
},
{
"epoch": 0.68,
"grad_norm": 0.0664835125207901,
"learning_rate": 0.00017595778186927546,
"loss": 0.692,
"step": 663
},
{
"epoch": 0.68,
"grad_norm": 0.06954433768987656,
"learning_rate": 0.00017588720413478596,
"loss": 0.68,
"step": 664
},
{
"epoch": 0.69,
"grad_norm": 0.06957016885280609,
"learning_rate": 0.00017581653715789157,
"loss": 0.7483,
"step": 665
},
{
"epoch": 0.69,
"grad_norm": 0.07063329964876175,
"learning_rate": 0.00017574578102169577,
"loss": 0.7072,
"step": 666
},
{
"epoch": 0.69,
"grad_norm": 0.07582032680511475,
"learning_rate": 0.0001756749358094069,
"loss": 0.7581,
"step": 667
},
{
"epoch": 0.69,
"grad_norm": 0.06948222219944,
"learning_rate": 0.00017560400160433802,
"loss": 0.7368,
"step": 668
},
{
"epoch": 0.69,
"grad_norm": 0.07247303426265717,
"learning_rate": 0.00017553297848990688,
"loss": 0.743,
"step": 669
},
{
"epoch": 0.69,
"grad_norm": 0.07067783176898956,
"learning_rate": 0.00017546186654963578,
"loss": 0.7624,
"step": 670
},
{
"epoch": 0.69,
"grad_norm": 0.07306205481290817,
"learning_rate": 0.0001753906658671515,
"loss": 0.7256,
"step": 671
},
{
"epoch": 0.69,
"grad_norm": 0.06867087632417679,
"learning_rate": 0.00017531937652618513,
"loss": 0.7204,
"step": 672
},
{
"epoch": 0.69,
"grad_norm": 0.07339783757925034,
"learning_rate": 0.000175247998610572,
"loss": 0.752,
"step": 673
},
{
"epoch": 0.7,
"grad_norm": 0.0657355785369873,
"learning_rate": 0.00017517653220425173,
"loss": 0.7191,
"step": 674
},
{
"epoch": 0.7,
"grad_norm": 0.067570261657238,
"learning_rate": 0.00017510497739126786,
"loss": 0.7149,
"step": 675
},
{
"epoch": 0.7,
"grad_norm": 0.0657983049750328,
"learning_rate": 0.000175033334255768,
"loss": 0.6925,
"step": 676
},
{
"epoch": 0.7,
"grad_norm": 0.07179775089025497,
"learning_rate": 0.00017496160288200358,
"loss": 0.7144,
"step": 677
},
{
"epoch": 0.7,
"grad_norm": 0.06727674603462219,
"learning_rate": 0.00017488978335432974,
"loss": 0.7607,
"step": 678
},
{
"epoch": 0.7,
"grad_norm": 0.0738164633512497,
"learning_rate": 0.00017481787575720543,
"loss": 0.7518,
"step": 679
},
{
"epoch": 0.7,
"grad_norm": 0.07027100026607513,
"learning_rate": 0.00017474588017519303,
"loss": 0.8125,
"step": 680
},
{
"epoch": 0.7,
"grad_norm": 0.06803075224161148,
"learning_rate": 0.00017467379669295846,
"loss": 0.6404,
"step": 681
},
{
"epoch": 0.7,
"grad_norm": 0.0729600042104721,
"learning_rate": 0.00017460162539527103,
"loss": 0.7422,
"step": 682
},
{
"epoch": 0.7,
"grad_norm": 0.06913101673126221,
"learning_rate": 0.0001745293663670032,
"loss": 0.695,
"step": 683
},
{
"epoch": 0.71,
"grad_norm": 0.064353346824646,
"learning_rate": 0.0001744570196931308,
"loss": 0.7335,
"step": 684
},
{
"epoch": 0.71,
"grad_norm": 0.06986693292856216,
"learning_rate": 0.00017438458545873252,
"loss": 0.7419,
"step": 685
},
{
"epoch": 0.71,
"grad_norm": 0.07011342793703079,
"learning_rate": 0.00017431206374899017,
"loss": 0.6893,
"step": 686
},
{
"epoch": 0.71,
"grad_norm": 0.06858572363853455,
"learning_rate": 0.00017423945464918833,
"loss": 0.6907,
"step": 687
},
{
"epoch": 0.71,
"grad_norm": 0.08274766802787781,
"learning_rate": 0.00017416675824471448,
"loss": 0.7663,
"step": 688
},
{
"epoch": 0.71,
"grad_norm": 0.07006780803203583,
"learning_rate": 0.00017409397462105858,
"loss": 0.7254,
"step": 689
},
{
"epoch": 0.71,
"grad_norm": 0.07336114346981049,
"learning_rate": 0.0001740211038638133,
"loss": 0.6293,
"step": 690
},
{
"epoch": 0.71,
"grad_norm": 0.07286055386066437,
"learning_rate": 0.00017394814605867378,
"loss": 0.6905,
"step": 691
},
{
"epoch": 0.71,
"grad_norm": 0.06696517765522003,
"learning_rate": 0.00017387510129143746,
"loss": 0.7297,
"step": 692
},
{
"epoch": 0.71,
"grad_norm": 0.06974443048238754,
"learning_rate": 0.00017380196964800405,
"loss": 0.7166,
"step": 693
},
{
"epoch": 0.72,
"grad_norm": 0.06771736592054367,
"learning_rate": 0.0001737287512143755,
"loss": 0.7195,
"step": 694
},
{
"epoch": 0.72,
"grad_norm": 0.06780609488487244,
"learning_rate": 0.00017365544607665575,
"loss": 0.6517,
"step": 695
},
{
"epoch": 0.72,
"grad_norm": 0.06737010926008224,
"learning_rate": 0.0001735820543210507,
"loss": 0.6878,
"step": 696
},
{
"epoch": 0.72,
"grad_norm": 0.07415246218442917,
"learning_rate": 0.00017350857603386815,
"loss": 0.7592,
"step": 697
},
{
"epoch": 0.72,
"grad_norm": 0.06827431917190552,
"learning_rate": 0.0001734350113015177,
"loss": 0.7171,
"step": 698
},
{
"epoch": 0.72,
"grad_norm": 0.07075396925210953,
"learning_rate": 0.0001733613602105105,
"loss": 0.7731,
"step": 699
},
{
"epoch": 0.72,
"grad_norm": 0.0783689022064209,
"learning_rate": 0.00017328762284745937,
"loss": 0.6919,
"step": 700
},
{
"epoch": 0.72,
"grad_norm": 0.06844034790992737,
"learning_rate": 0.00017321379929907845,
"loss": 0.6868,
"step": 701
},
{
"epoch": 0.72,
"grad_norm": 0.07113159447908401,
"learning_rate": 0.00017313988965218335,
"loss": 0.7393,
"step": 702
},
{
"epoch": 0.72,
"grad_norm": 0.06837478280067444,
"learning_rate": 0.00017306589399369092,
"loss": 0.7923,
"step": 703
},
{
"epoch": 0.73,
"grad_norm": 0.06957865506410599,
"learning_rate": 0.00017299181241061908,
"loss": 0.6984,
"step": 704
},
{
"epoch": 0.73,
"grad_norm": 0.07044649869203568,
"learning_rate": 0.0001729176449900869,
"loss": 0.6994,
"step": 705
},
{
"epoch": 0.73,
"grad_norm": 0.06768269091844559,
"learning_rate": 0.00017284339181931427,
"loss": 0.6794,
"step": 706
},
{
"epoch": 0.73,
"grad_norm": 0.07598036527633667,
"learning_rate": 0.00017276905298562207,
"loss": 0.6755,
"step": 707
},
{
"epoch": 0.73,
"grad_norm": 0.07155588269233704,
"learning_rate": 0.00017269462857643183,
"loss": 0.7878,
"step": 708
},
{
"epoch": 0.73,
"grad_norm": 0.07248366624116898,
"learning_rate": 0.00017262011867926567,
"loss": 0.6732,
"step": 709
},
{
"epoch": 0.73,
"grad_norm": 0.07186141610145569,
"learning_rate": 0.00017254552338174633,
"loss": 0.6999,
"step": 710
},
{
"epoch": 0.73,
"grad_norm": 0.07100434601306915,
"learning_rate": 0.00017247084277159696,
"loss": 0.7418,
"step": 711
},
{
"epoch": 0.73,
"grad_norm": 0.06756517291069031,
"learning_rate": 0.000172396076936641,
"loss": 0.6875,
"step": 712
},
{
"epoch": 0.74,
"grad_norm": 0.06727565079927444,
"learning_rate": 0.00017232122596480217,
"loss": 0.7311,
"step": 713
},
{
"epoch": 0.74,
"grad_norm": 0.06914009153842926,
"learning_rate": 0.00017224628994410427,
"loss": 0.7064,
"step": 714
},
{
"epoch": 0.74,
"grad_norm": 0.06864607334136963,
"learning_rate": 0.0001721712689626711,
"loss": 0.7255,
"step": 715
},
{
"epoch": 0.74,
"grad_norm": 0.07154612243175507,
"learning_rate": 0.00017209616310872642,
"loss": 0.7724,
"step": 716
},
{
"epoch": 0.74,
"grad_norm": 0.07429131865501404,
"learning_rate": 0.00017202097247059382,
"loss": 0.6746,
"step": 717
},
{
"epoch": 0.74,
"grad_norm": 0.07139010727405548,
"learning_rate": 0.00017194569713669644,
"loss": 0.6725,
"step": 718
},
{
"epoch": 0.74,
"grad_norm": 0.07144646346569061,
"learning_rate": 0.00017187033719555722,
"loss": 0.7501,
"step": 719
},
{
"epoch": 0.74,
"grad_norm": 0.06815102696418762,
"learning_rate": 0.0001717948927357985,
"loss": 0.6723,
"step": 720
},
{
"epoch": 0.74,
"grad_norm": 0.06780737638473511,
"learning_rate": 0.00017171936384614202,
"loss": 0.6887,
"step": 721
},
{
"epoch": 0.74,
"grad_norm": 0.07604995369911194,
"learning_rate": 0.00017164375061540877,
"loss": 0.6663,
"step": 722
},
{
"epoch": 0.75,
"grad_norm": 0.07561536878347397,
"learning_rate": 0.00017156805313251904,
"loss": 0.7449,
"step": 723
},
{
"epoch": 0.75,
"grad_norm": 0.07025757431983948,
"learning_rate": 0.00017149227148649204,
"loss": 0.7432,
"step": 724
},
{
"epoch": 0.75,
"grad_norm": 0.07664772123098373,
"learning_rate": 0.0001714164057664461,
"loss": 0.751,
"step": 725
},
{
"epoch": 0.75,
"grad_norm": 0.07072637230157852,
"learning_rate": 0.00017134045606159837,
"loss": 0.7884,
"step": 726
},
{
"epoch": 0.75,
"grad_norm": 0.06859525293111801,
"learning_rate": 0.0001712644224612647,
"loss": 0.7368,
"step": 727
},
{
"epoch": 0.75,
"grad_norm": 0.06537380814552307,
"learning_rate": 0.00017118830505485967,
"loss": 0.716,
"step": 728
},
{
"epoch": 0.75,
"grad_norm": 0.06966748833656311,
"learning_rate": 0.00017111210393189644,
"loss": 0.7058,
"step": 729
},
{
"epoch": 0.75,
"eval_loss": 0.8200334310531616,
"eval_runtime": 111.8923,
"eval_samples_per_second": 88.818,
"eval_steps_per_second": 22.209,
"step": 729
},
{
"epoch": 0.75,
"grad_norm": 0.06748418509960175,
"learning_rate": 0.00017103581918198656,
"loss": 0.6189,
"step": 730
},
{
"epoch": 0.75,
"grad_norm": 0.0700392946600914,
"learning_rate": 0.0001709594508948399,
"loss": 0.671,
"step": 731
},
{
"epoch": 0.75,
"grad_norm": 0.07225173711776733,
"learning_rate": 0.0001708829991602647,
"loss": 0.6932,
"step": 732
},
{
"epoch": 0.76,
"grad_norm": 0.07260441780090332,
"learning_rate": 0.00017080646406816716,
"loss": 0.6667,
"step": 733
},
{
"epoch": 0.76,
"grad_norm": 0.07659890502691269,
"learning_rate": 0.00017072984570855164,
"loss": 0.7625,
"step": 734
},
{
"epoch": 0.76,
"grad_norm": 0.07129763811826706,
"learning_rate": 0.0001706531441715203,
"loss": 0.6938,
"step": 735
},
{
"epoch": 0.76,
"grad_norm": 0.07404035329818726,
"learning_rate": 0.00017057635954727336,
"loss": 0.7061,
"step": 736
},
{
"epoch": 0.76,
"grad_norm": 0.07175761461257935,
"learning_rate": 0.00017049949192610844,
"loss": 0.7886,
"step": 737
},
{
"epoch": 0.76,
"grad_norm": 0.06855437159538269,
"learning_rate": 0.00017042254139842088,
"loss": 0.7088,
"step": 738
},
{
"epoch": 0.76,
"grad_norm": 0.07093068957328796,
"learning_rate": 0.00017034550805470366,
"loss": 0.6564,
"step": 739
},
{
"epoch": 0.76,
"grad_norm": 0.07080741226673126,
"learning_rate": 0.00017026839198554692,
"loss": 0.6722,
"step": 740
},
{
"epoch": 0.76,
"grad_norm": 0.0733497142791748,
"learning_rate": 0.0001701911932816383,
"loss": 0.7528,
"step": 741
},
{
"epoch": 0.77,
"grad_norm": 0.07160144299268723,
"learning_rate": 0.0001701139120337624,
"loss": 0.6859,
"step": 742
},
{
"epoch": 0.77,
"grad_norm": 0.07589136064052582,
"learning_rate": 0.00017003654833280108,
"loss": 0.7875,
"step": 743
},
{
"epoch": 0.77,
"grad_norm": 0.07213631272315979,
"learning_rate": 0.000169959102269733,
"loss": 0.6945,
"step": 744
},
{
"epoch": 0.77,
"grad_norm": 0.07278650999069214,
"learning_rate": 0.00016988157393563392,
"loss": 0.7683,
"step": 745
},
{
"epoch": 0.77,
"grad_norm": 0.0748310461640358,
"learning_rate": 0.00016980396342167607,
"loss": 0.7254,
"step": 746
},
{
"epoch": 0.77,
"grad_norm": 0.07376185059547424,
"learning_rate": 0.00016972627081912847,
"loss": 0.7203,
"step": 747
},
{
"epoch": 0.77,
"grad_norm": 0.07464928179979324,
"learning_rate": 0.00016964849621935665,
"loss": 0.7687,
"step": 748
},
{
"epoch": 0.77,
"grad_norm": 0.06802686303853989,
"learning_rate": 0.00016957063971382257,
"loss": 0.9044,
"step": 749
},
{
"epoch": 0.77,
"grad_norm": 0.07590179145336151,
"learning_rate": 0.00016949270139408453,
"loss": 0.7479,
"step": 750
},
{
"epoch": 0.77,
"grad_norm": 0.07608579099178314,
"learning_rate": 0.00016941468135179698,
"loss": 0.7025,
"step": 751
},
{
"epoch": 0.78,
"grad_norm": 0.07457372546195984,
"learning_rate": 0.00016933657967871057,
"loss": 0.7219,
"step": 752
},
{
"epoch": 0.78,
"grad_norm": 0.07248198240995407,
"learning_rate": 0.00016925839646667184,
"loss": 0.7408,
"step": 753
},
{
"epoch": 0.78,
"grad_norm": 0.07194027304649353,
"learning_rate": 0.00016918013180762333,
"loss": 0.7088,
"step": 754
},
{
"epoch": 0.78,
"grad_norm": 0.0747242271900177,
"learning_rate": 0.00016910178579360323,
"loss": 0.6723,
"step": 755
},
{
"epoch": 0.78,
"grad_norm": 0.07224127650260925,
"learning_rate": 0.00016902335851674553,
"loss": 0.7307,
"step": 756
},
{
"epoch": 0.78,
"grad_norm": 0.08432527631521225,
"learning_rate": 0.00016894485006927973,
"loss": 0.6041,
"step": 757
},
{
"epoch": 0.78,
"grad_norm": 0.06535546481609344,
"learning_rate": 0.00016886626054353073,
"loss": 0.7007,
"step": 758
},
{
"epoch": 0.78,
"grad_norm": 0.0744195505976677,
"learning_rate": 0.00016878759003191888,
"loss": 0.7543,
"step": 759
},
{
"epoch": 0.78,
"grad_norm": 0.0689319595694542,
"learning_rate": 0.0001687088386269597,
"loss": 0.6784,
"step": 760
},
{
"epoch": 0.78,
"grad_norm": 0.06821681559085846,
"learning_rate": 0.0001686300064212639,
"loss": 0.6312,
"step": 761
},
{
"epoch": 0.79,
"grad_norm": 0.07716275751590729,
"learning_rate": 0.00016855109350753707,
"loss": 0.7033,
"step": 762
},
{
"epoch": 0.79,
"grad_norm": 0.07511652261018753,
"learning_rate": 0.00016847209997857996,
"loss": 0.7273,
"step": 763
},
{
"epoch": 0.79,
"grad_norm": 0.07603703439235687,
"learning_rate": 0.00016839302592728782,
"loss": 0.7864,
"step": 764
},
{
"epoch": 0.79,
"grad_norm": 0.0709453672170639,
"learning_rate": 0.00016831387144665088,
"loss": 0.663,
"step": 765
},
{
"epoch": 0.79,
"grad_norm": 0.07530734688043594,
"learning_rate": 0.0001682346366297537,
"loss": 0.6986,
"step": 766
},
{
"epoch": 0.79,
"grad_norm": 0.09247121959924698,
"learning_rate": 0.00016815532156977555,
"loss": 0.687,
"step": 767
},
{
"epoch": 0.79,
"grad_norm": 0.06963692605495453,
"learning_rate": 0.00016807592635998986,
"loss": 0.6596,
"step": 768
},
{
"epoch": 0.79,
"grad_norm": 0.0712541937828064,
"learning_rate": 0.00016799645109376446,
"loss": 0.7263,
"step": 769
},
{
"epoch": 0.79,
"grad_norm": 0.07108648121356964,
"learning_rate": 0.00016791689586456124,
"loss": 0.7563,
"step": 770
},
{
"epoch": 0.8,
"grad_norm": 0.07317429780960083,
"learning_rate": 0.0001678372607659362,
"loss": 0.6971,
"step": 771
},
{
"epoch": 0.8,
"grad_norm": 0.07137763500213623,
"learning_rate": 0.00016775754589153913,
"loss": 0.6823,
"step": 772
},
{
"epoch": 0.8,
"grad_norm": 0.07120873034000397,
"learning_rate": 0.00016767775133511384,
"loss": 0.6653,
"step": 773
},
{
"epoch": 0.8,
"grad_norm": 0.07151015102863312,
"learning_rate": 0.00016759787719049766,
"loss": 0.655,
"step": 774
},
{
"epoch": 0.8,
"grad_norm": 0.07391924411058426,
"learning_rate": 0.00016751792355162163,
"loss": 0.7302,
"step": 775
},
{
"epoch": 0.8,
"grad_norm": 0.07449232786893845,
"learning_rate": 0.0001674378905125102,
"loss": 0.734,
"step": 776
},
{
"epoch": 0.8,
"grad_norm": 0.06854017078876495,
"learning_rate": 0.0001673577781672812,
"loss": 0.7211,
"step": 777
},
{
"epoch": 0.8,
"grad_norm": 0.07284322381019592,
"learning_rate": 0.00016727758661014587,
"loss": 0.6797,
"step": 778
},
{
"epoch": 0.8,
"grad_norm": 0.07225552201271057,
"learning_rate": 0.00016719731593540832,
"loss": 0.6995,
"step": 779
},
{
"epoch": 0.8,
"grad_norm": 0.07095993310213089,
"learning_rate": 0.00016711696623746596,
"loss": 0.7113,
"step": 780
},
{
"epoch": 0.81,
"grad_norm": 0.07299510389566422,
"learning_rate": 0.000167036537610809,
"loss": 0.6578,
"step": 781
},
{
"epoch": 0.81,
"grad_norm": 0.07212400436401367,
"learning_rate": 0.0001669560301500205,
"loss": 0.6849,
"step": 782
},
{
"epoch": 0.81,
"grad_norm": 0.07263436913490295,
"learning_rate": 0.00016687544394977625,
"loss": 0.7493,
"step": 783
},
{
"epoch": 0.81,
"grad_norm": 0.06992338597774506,
"learning_rate": 0.00016679477910484463,
"loss": 0.7447,
"step": 784
},
{
"epoch": 0.81,
"grad_norm": 0.07319390028715134,
"learning_rate": 0.00016671403571008643,
"loss": 0.6864,
"step": 785
},
{
"epoch": 0.81,
"grad_norm": 0.07034803926944733,
"learning_rate": 0.0001666332138604549,
"loss": 0.6968,
"step": 786
},
{
"epoch": 0.81,
"grad_norm": 0.06971286237239838,
"learning_rate": 0.00016655231365099557,
"loss": 0.642,
"step": 787
},
{
"epoch": 0.81,
"grad_norm": 0.07530028373003006,
"learning_rate": 0.000166471335176846,
"loss": 0.64,
"step": 788
},
{
"epoch": 0.81,
"grad_norm": 0.06948436051607132,
"learning_rate": 0.00016639027853323596,
"loss": 0.7564,
"step": 789
},
{
"epoch": 0.81,
"grad_norm": 0.07382038235664368,
"learning_rate": 0.00016630914381548695,
"loss": 0.8079,
"step": 790
},
{
"epoch": 0.82,
"grad_norm": 0.08063971996307373,
"learning_rate": 0.00016622793111901245,
"loss": 0.6679,
"step": 791
},
{
"epoch": 0.82,
"grad_norm": 0.065667524933815,
"learning_rate": 0.00016614664053931757,
"loss": 0.6448,
"step": 792
},
{
"epoch": 0.82,
"grad_norm": 0.07058931887149811,
"learning_rate": 0.00016606527217199899,
"loss": 0.6972,
"step": 793
},
{
"epoch": 0.82,
"grad_norm": 0.06650307774543762,
"learning_rate": 0.00016598382611274492,
"loss": 0.6352,
"step": 794
},
{
"epoch": 0.82,
"grad_norm": 0.0777771845459938,
"learning_rate": 0.0001659023024573349,
"loss": 0.6967,
"step": 795
},
{
"epoch": 0.82,
"grad_norm": 0.07293581962585449,
"learning_rate": 0.00016582070130163973,
"loss": 0.7427,
"step": 796
},
{
"epoch": 0.82,
"grad_norm": 0.0743393748998642,
"learning_rate": 0.00016573902274162134,
"loss": 0.7339,
"step": 797
},
{
"epoch": 0.82,
"grad_norm": 0.07685486972332001,
"learning_rate": 0.00016565726687333275,
"loss": 0.7306,
"step": 798
},
{
"epoch": 0.82,
"grad_norm": 0.07364094257354736,
"learning_rate": 0.00016557543379291776,
"loss": 0.7842,
"step": 799
},
{
"epoch": 0.82,
"grad_norm": 0.07045711576938629,
"learning_rate": 0.00016549352359661112,
"loss": 0.6929,
"step": 800
},
{
"epoch": 0.83,
"grad_norm": 0.07396701723337173,
"learning_rate": 0.00016541153638073816,
"loss": 0.7083,
"step": 801
},
{
"epoch": 0.83,
"grad_norm": 0.07448946684598923,
"learning_rate": 0.0001653294722417148,
"loss": 0.6738,
"step": 802
},
{
"epoch": 0.83,
"grad_norm": 0.07139252871274948,
"learning_rate": 0.00016524733127604754,
"loss": 0.7071,
"step": 803
},
{
"epoch": 0.83,
"grad_norm": 0.07060480862855911,
"learning_rate": 0.000165165113580333,
"loss": 0.6935,
"step": 804
},
{
"epoch": 0.83,
"grad_norm": 0.0778721421957016,
"learning_rate": 0.0001650828192512583,
"loss": 0.7127,
"step": 805
},
{
"epoch": 0.83,
"grad_norm": 0.07079839706420898,
"learning_rate": 0.00016500044838560043,
"loss": 0.7437,
"step": 806
},
{
"epoch": 0.83,
"grad_norm": 0.07176418602466583,
"learning_rate": 0.00016491800108022657,
"loss": 0.6958,
"step": 807
},
{
"epoch": 0.83,
"grad_norm": 0.07123876363039017,
"learning_rate": 0.00016483547743209367,
"loss": 0.6034,
"step": 808
},
{
"epoch": 0.83,
"grad_norm": 0.07393837720155716,
"learning_rate": 0.00016475287753824853,
"loss": 0.7054,
"step": 809
},
{
"epoch": 0.84,
"grad_norm": 0.07148581743240356,
"learning_rate": 0.00016467020149582763,
"loss": 0.7549,
"step": 810
},
{
"epoch": 0.84,
"grad_norm": 0.07241423428058624,
"learning_rate": 0.0001645874494020569,
"loss": 0.6807,
"step": 811
},
{
"epoch": 0.84,
"grad_norm": 0.067020945250988,
"learning_rate": 0.00016450462135425187,
"loss": 0.6689,
"step": 812
},
{
"epoch": 0.84,
"grad_norm": 0.07124791294336319,
"learning_rate": 0.00016442171744981713,
"loss": 0.7972,
"step": 813
},
{
"epoch": 0.84,
"grad_norm": 0.06952305883169174,
"learning_rate": 0.00016433873778624682,
"loss": 0.6847,
"step": 814
},
{
"epoch": 0.84,
"grad_norm": 0.07224664092063904,
"learning_rate": 0.00016425568246112383,
"loss": 0.7467,
"step": 815
},
{
"epoch": 0.84,
"grad_norm": 0.07566121965646744,
"learning_rate": 0.00016417255157212033,
"loss": 0.6625,
"step": 816
},
{
"epoch": 0.84,
"grad_norm": 0.07257838547229767,
"learning_rate": 0.00016408934521699707,
"loss": 0.6801,
"step": 817
},
{
"epoch": 0.84,
"grad_norm": 0.06923528760671616,
"learning_rate": 0.00016400606349360375,
"loss": 0.6777,
"step": 818
},
{
"epoch": 0.84,
"grad_norm": 0.07393685728311539,
"learning_rate": 0.0001639227064998787,
"loss": 0.654,
"step": 819
},
{
"epoch": 0.85,
"grad_norm": 0.07075174897909164,
"learning_rate": 0.00016383927433384857,
"loss": 0.7084,
"step": 820
},
{
"epoch": 0.85,
"grad_norm": 0.07403064519166946,
"learning_rate": 0.00016375576709362868,
"loss": 0.6926,
"step": 821
},
{
"epoch": 0.85,
"grad_norm": 0.07139717042446136,
"learning_rate": 0.00016367218487742239,
"loss": 0.7008,
"step": 822
},
{
"epoch": 0.85,
"grad_norm": 0.07197099924087524,
"learning_rate": 0.00016358852778352142,
"loss": 0.758,
"step": 823
},
{
"epoch": 0.85,
"grad_norm": 0.07550374418497086,
"learning_rate": 0.0001635047959103054,
"loss": 0.7896,
"step": 824
},
{
"epoch": 0.85,
"grad_norm": 0.09666828066110611,
"learning_rate": 0.00016342098935624204,
"loss": 0.6539,
"step": 825
},
{
"epoch": 0.85,
"grad_norm": 0.06977133452892303,
"learning_rate": 0.00016333710821988677,
"loss": 0.7262,
"step": 826
},
{
"epoch": 0.85,
"grad_norm": 0.06979740411043167,
"learning_rate": 0.00016325315259988274,
"loss": 0.7076,
"step": 827
},
{
"epoch": 0.85,
"grad_norm": 0.06903263181447983,
"learning_rate": 0.00016316912259496075,
"loss": 0.7015,
"step": 828
},
{
"epoch": 0.85,
"grad_norm": 0.07616313546895981,
"learning_rate": 0.000163085018303939,
"loss": 0.7447,
"step": 829
},
{
"epoch": 0.86,
"grad_norm": 0.07552159577608109,
"learning_rate": 0.00016300083982572313,
"loss": 0.7927,
"step": 830
},
{
"epoch": 0.86,
"grad_norm": 0.07039552181959152,
"learning_rate": 0.00016291658725930592,
"loss": 0.7102,
"step": 831
},
{
"epoch": 0.86,
"grad_norm": 0.0684932991862297,
"learning_rate": 0.00016283226070376737,
"loss": 0.6915,
"step": 832
},
{
"epoch": 0.86,
"grad_norm": 0.07504136115312576,
"learning_rate": 0.00016274786025827445,
"loss": 0.7161,
"step": 833
},
{
"epoch": 0.86,
"grad_norm": 0.07205895334482193,
"learning_rate": 0.0001626633860220811,
"loss": 0.751,
"step": 834
},
{
"epoch": 0.86,
"grad_norm": 0.07040917873382568,
"learning_rate": 0.00016257883809452786,
"loss": 0.6841,
"step": 835
},
{
"epoch": 0.86,
"grad_norm": 0.07174846529960632,
"learning_rate": 0.00016249421657504209,
"loss": 0.6553,
"step": 836
},
{
"epoch": 0.86,
"grad_norm": 0.08638732880353928,
"learning_rate": 0.0001624095215631376,
"loss": 0.6641,
"step": 837
},
{
"epoch": 0.86,
"grad_norm": 0.06990225613117218,
"learning_rate": 0.00016232475315841474,
"loss": 0.674,
"step": 838
},
{
"epoch": 0.87,
"grad_norm": 0.0807608887553215,
"learning_rate": 0.00016223991146056002,
"loss": 0.7398,
"step": 839
},
{
"epoch": 0.87,
"grad_norm": 0.0735122337937355,
"learning_rate": 0.0001621549965693463,
"loss": 0.7068,
"step": 840
},
{
"epoch": 0.87,
"grad_norm": 0.07527075707912445,
"learning_rate": 0.00016207000858463238,
"loss": 0.7135,
"step": 841
},
{
"epoch": 0.87,
"grad_norm": 0.07502257823944092,
"learning_rate": 0.00016198494760636303,
"loss": 0.8198,
"step": 842
},
{
"epoch": 0.87,
"grad_norm": 0.0750960186123848,
"learning_rate": 0.00016189981373456897,
"loss": 0.6907,
"step": 843
},
{
"epoch": 0.87,
"grad_norm": 0.06994899362325668,
"learning_rate": 0.00016181460706936654,
"loss": 0.7043,
"step": 844
},
{
"epoch": 0.87,
"grad_norm": 0.0717443972826004,
"learning_rate": 0.00016172932771095773,
"loss": 0.8281,
"step": 845
},
{
"epoch": 0.87,
"grad_norm": 0.06987167149782181,
"learning_rate": 0.00016164397575962997,
"loss": 0.7121,
"step": 846
},
{
"epoch": 0.87,
"grad_norm": 0.0782201811671257,
"learning_rate": 0.00016155855131575614,
"loss": 0.7374,
"step": 847
},
{
"epoch": 0.87,
"grad_norm": 0.07733765989542007,
"learning_rate": 0.00016147305447979427,
"loss": 0.7465,
"step": 848
},
{
"epoch": 0.88,
"grad_norm": 0.07451386749744415,
"learning_rate": 0.00016138748535228758,
"loss": 0.6913,
"step": 849
},
{
"epoch": 0.88,
"grad_norm": 0.07971774786710739,
"learning_rate": 0.00016130184403386432,
"loss": 0.6886,
"step": 850
},
{
"epoch": 0.88,
"grad_norm": 0.07056500762701035,
"learning_rate": 0.0001612161306252376,
"loss": 0.6443,
"step": 851
},
{
"epoch": 0.88,
"grad_norm": 0.07071709632873535,
"learning_rate": 0.00016113034522720533,
"loss": 0.6901,
"step": 852
},
{
"epoch": 0.88,
"grad_norm": 0.0736258253455162,
"learning_rate": 0.00016104448794065004,
"loss": 0.7571,
"step": 853
},
{
"epoch": 0.88,
"grad_norm": 0.08299355953931808,
"learning_rate": 0.00016095855886653885,
"loss": 0.6946,
"step": 854
},
{
"epoch": 0.88,
"grad_norm": 0.0706866979598999,
"learning_rate": 0.00016087255810592327,
"loss": 0.7256,
"step": 855
},
{
"epoch": 0.88,
"grad_norm": 0.07274890691041946,
"learning_rate": 0.0001607864857599391,
"loss": 0.7055,
"step": 856
},
{
"epoch": 0.88,
"grad_norm": 0.07094918936491013,
"learning_rate": 0.00016070034192980638,
"loss": 0.6784,
"step": 857
},
{
"epoch": 0.88,
"grad_norm": 0.0819023996591568,
"learning_rate": 0.00016061412671682917,
"loss": 0.6779,
"step": 858
},
{
"epoch": 0.89,
"grad_norm": 0.07570182532072067,
"learning_rate": 0.00016052784022239547,
"loss": 0.7238,
"step": 859
},
{
"epoch": 0.89,
"grad_norm": 0.07302212715148926,
"learning_rate": 0.00016044148254797715,
"loss": 0.6488,
"step": 860
},
{
"epoch": 0.89,
"grad_norm": 0.06959427148103714,
"learning_rate": 0.00016035505379512975,
"loss": 0.683,
"step": 861
},
{
"epoch": 0.89,
"grad_norm": 0.07809410244226456,
"learning_rate": 0.00016026855406549238,
"loss": 0.6587,
"step": 862
},
{
"epoch": 0.89,
"grad_norm": 0.08332572132349014,
"learning_rate": 0.00016018198346078762,
"loss": 0.7443,
"step": 863
},
{
"epoch": 0.89,
"grad_norm": 0.07175584137439728,
"learning_rate": 0.00016009534208282148,
"loss": 0.6607,
"step": 864
},
{
"epoch": 0.89,
"grad_norm": 0.07386159151792526,
"learning_rate": 0.00016000863003348315,
"loss": 0.7377,
"step": 865
},
{
"epoch": 0.89,
"grad_norm": 0.07763604819774628,
"learning_rate": 0.0001599218474147448,
"loss": 0.7269,
"step": 866
},
{
"epoch": 0.89,
"grad_norm": 0.08228732645511627,
"learning_rate": 0.00015983499432866186,
"loss": 0.7337,
"step": 867
},
{
"epoch": 0.9,
"grad_norm": 0.0716525986790657,
"learning_rate": 0.0001597480708773724,
"loss": 0.6779,
"step": 868
},
{
"epoch": 0.9,
"grad_norm": 0.07602430880069733,
"learning_rate": 0.00015966107716309727,
"loss": 0.6689,
"step": 869
},
{
"epoch": 0.9,
"grad_norm": 0.07297798246145248,
"learning_rate": 0.00015957401328814007,
"loss": 0.7421,
"step": 870
},
{
"epoch": 0.9,
"grad_norm": 0.07451054453849792,
"learning_rate": 0.0001594868793548868,
"loss": 0.7841,
"step": 871
},
{
"epoch": 0.9,
"grad_norm": 0.0786144807934761,
"learning_rate": 0.0001593996754658059,
"loss": 0.6227,
"step": 872
},
{
"epoch": 0.9,
"grad_norm": 0.07301963865756989,
"learning_rate": 0.000159312401723448,
"loss": 0.7599,
"step": 873
},
{
"epoch": 0.9,
"grad_norm": 0.07685115933418274,
"learning_rate": 0.00015922505823044597,
"loss": 0.7269,
"step": 874
},
{
"epoch": 0.9,
"grad_norm": 0.08059655129909515,
"learning_rate": 0.0001591376450895147,
"loss": 0.7869,
"step": 875
},
{
"epoch": 0.9,
"grad_norm": 0.07008222490549088,
"learning_rate": 0.00015905016240345087,
"loss": 0.6547,
"step": 876
},
{
"epoch": 0.9,
"grad_norm": 0.0795569196343422,
"learning_rate": 0.0001589626102751331,
"loss": 0.7661,
"step": 877
},
{
"epoch": 0.91,
"grad_norm": 0.07241260260343552,
"learning_rate": 0.00015887498880752155,
"loss": 0.7443,
"step": 878
},
{
"epoch": 0.91,
"grad_norm": 0.07290566712617874,
"learning_rate": 0.000158787298103658,
"loss": 0.7087,
"step": 879
},
{
"epoch": 0.91,
"grad_norm": 0.07114351540803909,
"learning_rate": 0.0001586995382666656,
"loss": 0.7609,
"step": 880
},
{
"epoch": 0.91,
"grad_norm": 0.07909577339887619,
"learning_rate": 0.0001586117093997489,
"loss": 0.7302,
"step": 881
},
{
"epoch": 0.91,
"grad_norm": 0.06962461024522781,
"learning_rate": 0.00015852381160619343,
"loss": 0.7363,
"step": 882
},
{
"epoch": 0.91,
"grad_norm": 0.07440977543592453,
"learning_rate": 0.000158435844989366,
"loss": 0.696,
"step": 883
},
{
"epoch": 0.91,
"grad_norm": 0.0998484343290329,
"learning_rate": 0.0001583478096527142,
"loss": 0.7897,
"step": 884
},
{
"epoch": 0.91,
"grad_norm": 0.08223816752433777,
"learning_rate": 0.0001582597056997665,
"loss": 0.6683,
"step": 885
},
{
"epoch": 0.91,
"grad_norm": 0.07521655410528183,
"learning_rate": 0.00015817153323413206,
"loss": 0.7025,
"step": 886
},
{
"epoch": 0.91,
"grad_norm": 0.07854367047548294,
"learning_rate": 0.0001580832923595006,
"loss": 0.6931,
"step": 887
},
{
"epoch": 0.92,
"grad_norm": 0.07404212653636932,
"learning_rate": 0.00015799498317964225,
"loss": 0.7398,
"step": 888
},
{
"epoch": 0.92,
"grad_norm": 0.07481147348880768,
"learning_rate": 0.00015790660579840753,
"loss": 0.6826,
"step": 889
},
{
"epoch": 0.92,
"grad_norm": 0.07364954799413681,
"learning_rate": 0.00015781816031972717,
"loss": 0.6833,
"step": 890
},
{
"epoch": 0.92,
"grad_norm": 0.07147948443889618,
"learning_rate": 0.00015772964684761186,
"loss": 0.6816,
"step": 891
},
{
"epoch": 0.92,
"grad_norm": 0.07281176000833511,
"learning_rate": 0.00015764106548615242,
"loss": 0.7211,
"step": 892
},
{
"epoch": 0.92,
"grad_norm": 0.07681546360254288,
"learning_rate": 0.0001575524163395194,
"loss": 0.6958,
"step": 893
},
{
"epoch": 0.92,
"grad_norm": 0.07273909449577332,
"learning_rate": 0.0001574636995119631,
"loss": 0.7166,
"step": 894
},
{
"epoch": 0.92,
"grad_norm": 0.08109702169895172,
"learning_rate": 0.0001573749151078134,
"loss": 0.734,
"step": 895
},
{
"epoch": 0.92,
"grad_norm": 0.07343237847089767,
"learning_rate": 0.00015728606323147965,
"loss": 0.6613,
"step": 896
},
{
"epoch": 0.92,
"grad_norm": 0.06834150850772858,
"learning_rate": 0.0001571971439874505,
"loss": 0.6678,
"step": 897
},
{
"epoch": 0.93,
"grad_norm": 0.07291509956121445,
"learning_rate": 0.00015710815748029396,
"loss": 0.682,
"step": 898
},
{
"epoch": 0.93,
"grad_norm": 0.07433850318193436,
"learning_rate": 0.00015701910381465695,
"loss": 0.6787,
"step": 899
},
{
"epoch": 0.93,
"grad_norm": 0.06953968852758408,
"learning_rate": 0.00015692998309526556,
"loss": 0.6631,
"step": 900
},
{
"epoch": 0.93,
"grad_norm": 0.07962784171104431,
"learning_rate": 0.0001568407954269246,
"loss": 0.696,
"step": 901
},
{
"epoch": 0.93,
"grad_norm": 0.07209240645170212,
"learning_rate": 0.00015675154091451764,
"loss": 0.7159,
"step": 902
},
{
"epoch": 0.93,
"grad_norm": 0.0718846246600151,
"learning_rate": 0.00015666221966300693,
"loss": 0.7385,
"step": 903
},
{
"epoch": 0.93,
"grad_norm": 0.07707607001066208,
"learning_rate": 0.00015657283177743307,
"loss": 0.7408,
"step": 904
},
{
"epoch": 0.93,
"grad_norm": 0.0742068812251091,
"learning_rate": 0.00015648337736291515,
"loss": 0.7541,
"step": 905
},
{
"epoch": 0.93,
"grad_norm": 0.07085248082876205,
"learning_rate": 0.00015639385652465047,
"loss": 0.6785,
"step": 906
},
{
"epoch": 0.94,
"grad_norm": 0.07351714372634888,
"learning_rate": 0.00015630426936791433,
"loss": 0.7232,
"step": 907
},
{
"epoch": 0.94,
"grad_norm": 0.0779218003153801,
"learning_rate": 0.0001562146159980602,
"loss": 0.7737,
"step": 908
},
{
"epoch": 0.94,
"grad_norm": 0.07323803752660751,
"learning_rate": 0.00015612489652051928,
"loss": 0.6536,
"step": 909
},
{
"epoch": 0.94,
"grad_norm": 0.08168342709541321,
"learning_rate": 0.00015603511104080056,
"loss": 0.7553,
"step": 910
},
{
"epoch": 0.94,
"grad_norm": 0.08360809832811356,
"learning_rate": 0.00015594525966449062,
"loss": 0.6522,
"step": 911
},
{
"epoch": 0.94,
"grad_norm": 0.07412074506282806,
"learning_rate": 0.00015585534249725359,
"loss": 0.7589,
"step": 912
},
{
"epoch": 0.94,
"grad_norm": 0.07159864902496338,
"learning_rate": 0.00015576535964483094,
"loss": 0.6933,
"step": 913
},
{
"epoch": 0.94,
"grad_norm": 0.0748409703373909,
"learning_rate": 0.00015567531121304134,
"loss": 0.7603,
"step": 914
},
{
"epoch": 0.94,
"grad_norm": 0.08181232959032059,
"learning_rate": 0.00015558519730778068,
"loss": 0.6875,
"step": 915
},
{
"epoch": 0.94,
"grad_norm": 0.08348861336708069,
"learning_rate": 0.00015549501803502173,
"loss": 0.6878,
"step": 916
},
{
"epoch": 0.95,
"grad_norm": 0.07457895576953888,
"learning_rate": 0.00015540477350081423,
"loss": 0.7035,
"step": 917
},
{
"epoch": 0.95,
"grad_norm": 0.07142025232315063,
"learning_rate": 0.00015531446381128464,
"loss": 0.6809,
"step": 918
},
{
"epoch": 0.95,
"grad_norm": 0.07224272936582565,
"learning_rate": 0.00015522408907263596,
"loss": 0.6987,
"step": 919
},
{
"epoch": 0.95,
"grad_norm": 0.07358361780643463,
"learning_rate": 0.0001551336493911478,
"loss": 0.7223,
"step": 920
},
{
"epoch": 0.95,
"grad_norm": 0.07335005700588226,
"learning_rate": 0.00015504314487317612,
"loss": 0.6515,
"step": 921
},
{
"epoch": 0.95,
"grad_norm": 0.0723937451839447,
"learning_rate": 0.00015495257562515307,
"loss": 0.7698,
"step": 922
},
{
"epoch": 0.95,
"grad_norm": 0.06864594668149948,
"learning_rate": 0.00015486194175358696,
"loss": 0.6483,
"step": 923
},
{
"epoch": 0.95,
"grad_norm": 0.07635841518640518,
"learning_rate": 0.00015477124336506207,
"loss": 0.6139,
"step": 924
},
{
"epoch": 0.95,
"grad_norm": 0.08424652367830276,
"learning_rate": 0.0001546804805662386,
"loss": 0.6604,
"step": 925
},
{
"epoch": 0.95,
"grad_norm": 0.07373332232236862,
"learning_rate": 0.00015458965346385248,
"loss": 0.734,
"step": 926
},
{
"epoch": 0.96,
"grad_norm": 0.07332025468349457,
"learning_rate": 0.00015449876216471525,
"loss": 0.7765,
"step": 927
},
{
"epoch": 0.96,
"grad_norm": 0.07534317672252655,
"learning_rate": 0.00015440780677571388,
"loss": 0.7436,
"step": 928
},
{
"epoch": 0.96,
"grad_norm": 0.08624652773141861,
"learning_rate": 0.00015431678740381085,
"loss": 0.7335,
"step": 929
},
{
"epoch": 0.96,
"grad_norm": 0.07540034502744675,
"learning_rate": 0.00015422570415604377,
"loss": 0.6808,
"step": 930
},
{
"epoch": 0.96,
"grad_norm": 0.07505679130554199,
"learning_rate": 0.00015413455713952538,
"loss": 0.7086,
"step": 931
},
{
"epoch": 0.96,
"grad_norm": 0.08109767735004425,
"learning_rate": 0.0001540433464614435,
"loss": 0.8099,
"step": 932
},
{
"epoch": 0.96,
"grad_norm": 0.07563024014234543,
"learning_rate": 0.00015395207222906068,
"loss": 0.7273,
"step": 933
},
{
"epoch": 0.96,
"grad_norm": 0.07199395447969437,
"learning_rate": 0.00015386073454971432,
"loss": 0.7727,
"step": 934
},
{
"epoch": 0.96,
"grad_norm": 0.08543197810649872,
"learning_rate": 0.00015376933353081635,
"loss": 0.705,
"step": 935
},
{
"epoch": 0.97,
"grad_norm": 0.07872208952903748,
"learning_rate": 0.00015367786927985327,
"loss": 0.7509,
"step": 936
},
{
"epoch": 0.97,
"grad_norm": 0.0703720971941948,
"learning_rate": 0.00015358634190438592,
"loss": 0.7214,
"step": 937
},
{
"epoch": 0.97,
"grad_norm": 0.0741022378206253,
"learning_rate": 0.0001534947515120493,
"loss": 0.7421,
"step": 938
},
{
"epoch": 0.97,
"grad_norm": 0.07758384943008423,
"learning_rate": 0.00015340309821055255,
"loss": 0.7013,
"step": 939
},
{
"epoch": 0.97,
"grad_norm": 0.07203276455402374,
"learning_rate": 0.00015331138210767885,
"loss": 0.7268,
"step": 940
},
{
"epoch": 0.97,
"grad_norm": 0.09579942375421524,
"learning_rate": 0.0001532196033112852,
"loss": 0.6617,
"step": 941
},
{
"epoch": 0.97,
"grad_norm": 0.07915519922971725,
"learning_rate": 0.00015312776192930228,
"loss": 0.7223,
"step": 942
},
{
"epoch": 0.97,
"grad_norm": 0.07456015795469284,
"learning_rate": 0.00015303585806973445,
"loss": 0.5984,
"step": 943
},
{
"epoch": 0.97,
"grad_norm": 0.07331335544586182,
"learning_rate": 0.0001529438918406595,
"loss": 0.6881,
"step": 944
},
{
"epoch": 0.97,
"grad_norm": 0.08498119562864304,
"learning_rate": 0.00015285186335022854,
"loss": 0.7509,
"step": 945
},
{
"epoch": 0.98,
"grad_norm": 0.08055809885263443,
"learning_rate": 0.0001527597727066659,
"loss": 0.7643,
"step": 946
},
{
"epoch": 0.98,
"grad_norm": 0.07158636301755905,
"learning_rate": 0.00015266762001826911,
"loss": 0.6522,
"step": 947
},
{
"epoch": 0.98,
"grad_norm": 0.0752343162894249,
"learning_rate": 0.00015257540539340852,
"loss": 0.6749,
"step": 948
},
{
"epoch": 0.98,
"grad_norm": 0.07281875610351562,
"learning_rate": 0.0001524831289405274,
"loss": 0.6796,
"step": 949
},
{
"epoch": 0.98,
"grad_norm": 0.08019158244132996,
"learning_rate": 0.00015239079076814166,
"loss": 0.6577,
"step": 950
},
{
"epoch": 0.98,
"grad_norm": 0.07102995365858078,
"learning_rate": 0.00015229839098483992,
"loss": 0.7034,
"step": 951
},
{
"epoch": 0.98,
"grad_norm": 0.07253885269165039,
"learning_rate": 0.00015220592969928315,
"loss": 0.6898,
"step": 952
},
{
"epoch": 0.98,
"grad_norm": 0.08555888384580612,
"learning_rate": 0.0001521134070202046,
"loss": 0.6885,
"step": 953
},
{
"epoch": 0.98,
"grad_norm": 0.09179149568080902,
"learning_rate": 0.00015202082305640984,
"loss": 0.6164,
"step": 954
},
{
"epoch": 0.98,
"grad_norm": 0.07192881405353546,
"learning_rate": 0.00015192817791677646,
"loss": 0.6862,
"step": 955
},
{
"epoch": 0.99,
"grad_norm": 0.07520081102848053,
"learning_rate": 0.00015183547171025399,
"loss": 0.6878,
"step": 956
},
{
"epoch": 0.99,
"grad_norm": 0.07251414656639099,
"learning_rate": 0.00015174270454586375,
"loss": 0.7239,
"step": 957
},
{
"epoch": 0.99,
"grad_norm": 0.0739588811993599,
"learning_rate": 0.00015164987653269876,
"loss": 0.7359,
"step": 958
},
{
"epoch": 0.99,
"grad_norm": 0.07903960347175598,
"learning_rate": 0.00015155698777992365,
"loss": 0.6744,
"step": 959
},
{
"epoch": 0.99,
"grad_norm": 0.07801897823810577,
"learning_rate": 0.0001514640383967744,
"loss": 0.7226,
"step": 960
},
{
"epoch": 0.99,
"grad_norm": 0.0692887008190155,
"learning_rate": 0.0001513710284925583,
"loss": 0.7153,
"step": 961
},
{
"epoch": 0.99,
"grad_norm": 0.07592754065990448,
"learning_rate": 0.0001512779581766539,
"loss": 0.7229,
"step": 962
},
{
"epoch": 0.99,
"grad_norm": 0.07932160049676895,
"learning_rate": 0.00015118482755851069,
"loss": 0.6589,
"step": 963
},
{
"epoch": 0.99,
"grad_norm": 0.07948953658342361,
"learning_rate": 0.0001510916367476491,
"loss": 0.7122,
"step": 964
},
{
"epoch": 1.0,
"grad_norm": 0.07656390219926834,
"learning_rate": 0.00015099838585366042,
"loss": 0.7,
"step": 965
},
{
"epoch": 1.0,
"grad_norm": 0.07349363714456558,
"learning_rate": 0.00015090507498620649,
"loss": 0.688,
"step": 966
},
{
"epoch": 1.0,
"grad_norm": 0.07416553050279617,
"learning_rate": 0.0001508117042550197,
"loss": 0.6698,
"step": 967
},
{
"epoch": 1.0,
"grad_norm": 0.07723015546798706,
"learning_rate": 0.0001507182737699029,
"loss": 0.7014,
"step": 968
},
{
"epoch": 1.0,
"grad_norm": 0.07453907281160355,
"learning_rate": 0.0001506247836407292,
"loss": 0.6841,
"step": 969
}
],
"logging_steps": 1,
"max_steps": 2907,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 969,
"total_flos": 2.8915365125900206e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}