test / checkpoint-426 /trainer_state.json
Volko76's picture
Upload folder using huggingface_hub
1eb0aee verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 107,
"global_step": 426,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.7947345972061157,
"learning_rate": 1e-05,
"loss": 1.4577,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 1.346970796585083,
"eval_runtime": 5.5779,
"eval_samples_per_second": 17.928,
"eval_steps_per_second": 17.928,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 0.780886709690094,
"learning_rate": 2e-05,
"loss": 1.6627,
"step": 2
},
{
"epoch": 0.01,
"grad_norm": 0.8595380187034607,
"learning_rate": 3e-05,
"loss": 1.2373,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 1.386765956878662,
"learning_rate": 4e-05,
"loss": 1.4095,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 0.8326601386070251,
"learning_rate": 5e-05,
"loss": 1.385,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 1.0141901969909668,
"learning_rate": 6e-05,
"loss": 1.311,
"step": 6
},
{
"epoch": 0.02,
"grad_norm": 0.9190280437469482,
"learning_rate": 7e-05,
"loss": 1.394,
"step": 7
},
{
"epoch": 0.02,
"grad_norm": 1.0408940315246582,
"learning_rate": 8e-05,
"loss": 1.1983,
"step": 8
},
{
"epoch": 0.02,
"grad_norm": 1.217496633529663,
"learning_rate": 9e-05,
"loss": 1.4083,
"step": 9
},
{
"epoch": 0.02,
"grad_norm": 1.0259888172149658,
"learning_rate": 0.0001,
"loss": 1.527,
"step": 10
},
{
"epoch": 0.03,
"grad_norm": 0.8085443377494812,
"learning_rate": 0.00011000000000000002,
"loss": 1.1892,
"step": 11
},
{
"epoch": 0.03,
"grad_norm": 1.468948245048523,
"learning_rate": 0.00012,
"loss": 1.6414,
"step": 12
},
{
"epoch": 0.03,
"grad_norm": 1.1099722385406494,
"learning_rate": 0.00013000000000000002,
"loss": 1.8412,
"step": 13
},
{
"epoch": 0.03,
"grad_norm": 1.6442979574203491,
"learning_rate": 0.00014,
"loss": 1.3497,
"step": 14
},
{
"epoch": 0.04,
"grad_norm": 0.8905379176139832,
"learning_rate": 0.00015000000000000001,
"loss": 0.7155,
"step": 15
},
{
"epoch": 0.04,
"grad_norm": 1.1754573583602905,
"learning_rate": 0.00016,
"loss": 2.5543,
"step": 16
},
{
"epoch": 0.04,
"grad_norm": 1.4055070877075195,
"learning_rate": 0.00017,
"loss": 1.3637,
"step": 17
},
{
"epoch": 0.04,
"grad_norm": 1.1854274272918701,
"learning_rate": 0.00018,
"loss": 0.9771,
"step": 18
},
{
"epoch": 0.04,
"grad_norm": 1.0462994575500488,
"learning_rate": 0.00019,
"loss": 1.3884,
"step": 19
},
{
"epoch": 0.05,
"grad_norm": 1.3554202318191528,
"learning_rate": 0.0002,
"loss": 1.3547,
"step": 20
},
{
"epoch": 0.05,
"grad_norm": 1.1008217334747314,
"learning_rate": 0.00019999700625010443,
"loss": 0.7969,
"step": 21
},
{
"epoch": 0.05,
"grad_norm": 1.003470540046692,
"learning_rate": 0.0001999880251796685,
"loss": 1.3606,
"step": 22
},
{
"epoch": 0.05,
"grad_norm": 0.8562943935394287,
"learning_rate": 0.00019997305732643374,
"loss": 1.1796,
"step": 23
},
{
"epoch": 0.06,
"grad_norm": 1.0724220275878906,
"learning_rate": 0.00019995210358660038,
"loss": 1.2886,
"step": 24
},
{
"epoch": 0.06,
"grad_norm": 1.0882127285003662,
"learning_rate": 0.00019992516521477352,
"loss": 1.2085,
"step": 25
},
{
"epoch": 0.06,
"grad_norm": 0.8728991150856018,
"learning_rate": 0.00019989224382388813,
"loss": 1.2168,
"step": 26
},
{
"epoch": 0.06,
"grad_norm": 1.0858105421066284,
"learning_rate": 0.00019985334138511237,
"loss": 1.1404,
"step": 27
},
{
"epoch": 0.07,
"grad_norm": 1.2043813467025757,
"learning_rate": 0.00019980846022772978,
"loss": 1.4167,
"step": 28
},
{
"epoch": 0.07,
"grad_norm": 0.8915519714355469,
"learning_rate": 0.00019975760303899952,
"loss": 1.3995,
"step": 29
},
{
"epoch": 0.07,
"grad_norm": 0.8792808055877686,
"learning_rate": 0.0001997007728639956,
"loss": 1.2375,
"step": 30
},
{
"epoch": 0.07,
"grad_norm": 0.8958542346954346,
"learning_rate": 0.0001996379731054247,
"loss": 1.0913,
"step": 31
},
{
"epoch": 0.08,
"grad_norm": 1.0441163778305054,
"learning_rate": 0.00019956920752342225,
"loss": 1.2435,
"step": 32
},
{
"epoch": 0.08,
"grad_norm": 1.2748677730560303,
"learning_rate": 0.00019949448023532726,
"loss": 1.5485,
"step": 33
},
{
"epoch": 0.08,
"grad_norm": 1.4271036386489868,
"learning_rate": 0.00019941379571543596,
"loss": 1.2604,
"step": 34
},
{
"epoch": 0.08,
"grad_norm": 1.1877238750457764,
"learning_rate": 0.00019932715879473386,
"loss": 1.391,
"step": 35
},
{
"epoch": 0.08,
"grad_norm": 0.8864910006523132,
"learning_rate": 0.00019923457466060636,
"loss": 0.8182,
"step": 36
},
{
"epoch": 0.09,
"grad_norm": 0.9275113344192505,
"learning_rate": 0.00019913604885652832,
"loss": 1.1828,
"step": 37
},
{
"epoch": 0.09,
"grad_norm": 0.8808721899986267,
"learning_rate": 0.00019903158728173205,
"loss": 0.5187,
"step": 38
},
{
"epoch": 0.09,
"grad_norm": 1.3636075258255005,
"learning_rate": 0.00019892119619085413,
"loss": 1.2401,
"step": 39
},
{
"epoch": 0.09,
"grad_norm": 0.9511018991470337,
"learning_rate": 0.00019880488219356087,
"loss": 1.3375,
"step": 40
},
{
"epoch": 0.1,
"grad_norm": 0.8974116444587708,
"learning_rate": 0.00019868265225415265,
"loss": 1.0299,
"step": 41
},
{
"epoch": 0.1,
"grad_norm": 1.2469041347503662,
"learning_rate": 0.00019855451369114676,
"loss": 1.3814,
"step": 42
},
{
"epoch": 0.1,
"grad_norm": 0.8123692870140076,
"learning_rate": 0.0001984204741768395,
"loss": 1.1434,
"step": 43
},
{
"epoch": 0.1,
"grad_norm": 0.8598824143409729,
"learning_rate": 0.00019828054173684644,
"loss": 0.9911,
"step": 44
},
{
"epoch": 0.11,
"grad_norm": 1.1425665616989136,
"learning_rate": 0.00019813472474962217,
"loss": 1.4346,
"step": 45
},
{
"epoch": 0.11,
"grad_norm": 0.8056216835975647,
"learning_rate": 0.00019798303194595846,
"loss": 1.221,
"step": 46
},
{
"epoch": 0.11,
"grad_norm": 1.738286018371582,
"learning_rate": 0.00019782547240846166,
"loss": 1.5189,
"step": 47
},
{
"epoch": 0.11,
"grad_norm": 1.21600341796875,
"learning_rate": 0.00019766205557100868,
"loss": 1.0516,
"step": 48
},
{
"epoch": 0.12,
"grad_norm": 0.9683429598808289,
"learning_rate": 0.00019749279121818235,
"loss": 1.2459,
"step": 49
},
{
"epoch": 0.12,
"grad_norm": 1.1180105209350586,
"learning_rate": 0.00019731768948468549,
"loss": 1.5256,
"step": 50
},
{
"epoch": 0.12,
"grad_norm": 1.0404270887374878,
"learning_rate": 0.00019713676085473397,
"loss": 1.125,
"step": 51
},
{
"epoch": 0.12,
"grad_norm": 1.1551333665847778,
"learning_rate": 0.00019695001616142915,
"loss": 1.4839,
"step": 52
},
{
"epoch": 0.12,
"grad_norm": 1.0433716773986816,
"learning_rate": 0.00019675746658610917,
"loss": 1.3104,
"step": 53
},
{
"epoch": 0.13,
"grad_norm": 1.487308144569397,
"learning_rate": 0.0001965591236576794,
"loss": 1.1968,
"step": 54
},
{
"epoch": 0.13,
"grad_norm": 0.8388357758522034,
"learning_rate": 0.0001963549992519223,
"loss": 1.3537,
"step": 55
},
{
"epoch": 0.13,
"grad_norm": 1.3177778720855713,
"learning_rate": 0.00019614510559078625,
"loss": 1.3413,
"step": 56
},
{
"epoch": 0.13,
"grad_norm": 1.2819414138793945,
"learning_rate": 0.00019592945524165374,
"loss": 1.0727,
"step": 57
},
{
"epoch": 0.14,
"grad_norm": 1.0701121091842651,
"learning_rate": 0.00019570806111658898,
"loss": 1.5754,
"step": 58
},
{
"epoch": 0.14,
"grad_norm": 0.8484037518501282,
"learning_rate": 0.0001954809364715648,
"loss": 1.2085,
"step": 59
},
{
"epoch": 0.14,
"grad_norm": 1.3628385066986084,
"learning_rate": 0.00019524809490566877,
"loss": 1.2707,
"step": 60
},
{
"epoch": 0.14,
"grad_norm": 0.9246305823326111,
"learning_rate": 0.00019500955036028922,
"loss": 1.1952,
"step": 61
},
{
"epoch": 0.15,
"grad_norm": 0.927686870098114,
"learning_rate": 0.00019476531711828027,
"loss": 1.1894,
"step": 62
},
{
"epoch": 0.15,
"grad_norm": 0.9189894795417786,
"learning_rate": 0.00019451540980310676,
"loss": 1.0086,
"step": 63
},
{
"epoch": 0.15,
"grad_norm": 0.8920311331748962,
"learning_rate": 0.0001942598433779687,
"loss": 1.355,
"step": 64
},
{
"epoch": 0.15,
"grad_norm": 1.1602280139923096,
"learning_rate": 0.00019399863314490526,
"loss": 1.1758,
"step": 65
},
{
"epoch": 0.15,
"grad_norm": 0.9801963567733765,
"learning_rate": 0.00019373179474387858,
"loss": 1.211,
"step": 66
},
{
"epoch": 0.16,
"grad_norm": 0.9547926187515259,
"learning_rate": 0.0001934593441518374,
"loss": 1.0282,
"step": 67
},
{
"epoch": 0.16,
"grad_norm": 0.9999262690544128,
"learning_rate": 0.00019318129768176032,
"loss": 1.1899,
"step": 68
},
{
"epoch": 0.16,
"grad_norm": 0.9414704442024231,
"learning_rate": 0.00019289767198167916,
"loss": 1.1515,
"step": 69
},
{
"epoch": 0.16,
"grad_norm": 0.9921344518661499,
"learning_rate": 0.0001926084840336821,
"loss": 1.0423,
"step": 70
},
{
"epoch": 0.17,
"grad_norm": 0.8750163912773132,
"learning_rate": 0.00019231375115289696,
"loss": 0.6788,
"step": 71
},
{
"epoch": 0.17,
"grad_norm": 1.3444360494613647,
"learning_rate": 0.00019201349098645434,
"loss": 1.241,
"step": 72
},
{
"epoch": 0.17,
"grad_norm": 0.6930075883865356,
"learning_rate": 0.00019170772151243107,
"loss": 1.0857,
"step": 73
},
{
"epoch": 0.17,
"grad_norm": 0.9557262659072876,
"learning_rate": 0.0001913964610387738,
"loss": 1.4148,
"step": 74
},
{
"epoch": 0.18,
"grad_norm": 0.9436715245246887,
"learning_rate": 0.00019107972820220267,
"loss": 1.1334,
"step": 75
},
{
"epoch": 0.18,
"grad_norm": 1.321735143661499,
"learning_rate": 0.00019075754196709572,
"loss": 1.4699,
"step": 76
},
{
"epoch": 0.18,
"grad_norm": 0.824515163898468,
"learning_rate": 0.00019042992162435302,
"loss": 0.9975,
"step": 77
},
{
"epoch": 0.18,
"grad_norm": 1.0362204313278198,
"learning_rate": 0.0001900968867902419,
"loss": 1.146,
"step": 78
},
{
"epoch": 0.19,
"grad_norm": 0.9640101194381714,
"learning_rate": 0.00018975845740522244,
"loss": 1.3615,
"step": 79
},
{
"epoch": 0.19,
"grad_norm": 0.9828828573226929,
"learning_rate": 0.0001894146537327533,
"loss": 1.0824,
"step": 80
},
{
"epoch": 0.19,
"grad_norm": 1.0409202575683594,
"learning_rate": 0.00018906549635807861,
"loss": 1.4383,
"step": 81
},
{
"epoch": 0.19,
"grad_norm": 1.0056612491607666,
"learning_rate": 0.00018871100618699554,
"loss": 1.4139,
"step": 82
},
{
"epoch": 0.19,
"grad_norm": 0.9796703457832336,
"learning_rate": 0.0001883512044446023,
"loss": 1.6273,
"step": 83
},
{
"epoch": 0.2,
"grad_norm": 0.887485682964325,
"learning_rate": 0.00018798611267402746,
"loss": 1.3315,
"step": 84
},
{
"epoch": 0.2,
"grad_norm": 0.959770679473877,
"learning_rate": 0.00018761575273514003,
"loss": 1.3016,
"step": 85
},
{
"epoch": 0.2,
"grad_norm": 0.9511293172836304,
"learning_rate": 0.00018724014680324057,
"loss": 1.2996,
"step": 86
},
{
"epoch": 0.2,
"grad_norm": 0.9222784638404846,
"learning_rate": 0.0001868593173677335,
"loss": 1.1622,
"step": 87
},
{
"epoch": 0.21,
"grad_norm": 0.7448104619979858,
"learning_rate": 0.00018647328723078038,
"loss": 0.8966,
"step": 88
},
{
"epoch": 0.21,
"grad_norm": 1.2796077728271484,
"learning_rate": 0.000186082079505935,
"loss": 1.0091,
"step": 89
},
{
"epoch": 0.21,
"grad_norm": 0.784875750541687,
"learning_rate": 0.00018568571761675893,
"loss": 1.0961,
"step": 90
},
{
"epoch": 0.21,
"grad_norm": 0.8300296068191528,
"learning_rate": 0.00018528422529541952,
"loss": 0.7698,
"step": 91
},
{
"epoch": 0.22,
"grad_norm": 0.8383649587631226,
"learning_rate": 0.0001848776265812687,
"loss": 1.0525,
"step": 92
},
{
"epoch": 0.22,
"grad_norm": 0.9814484119415283,
"learning_rate": 0.0001844659458194036,
"loss": 1.2679,
"step": 93
},
{
"epoch": 0.22,
"grad_norm": 0.8606963753700256,
"learning_rate": 0.00018404920765920896,
"loss": 1.4763,
"step": 94
},
{
"epoch": 0.22,
"grad_norm": 1.2019658088684082,
"learning_rate": 0.00018362743705288125,
"loss": 1.376,
"step": 95
},
{
"epoch": 0.23,
"grad_norm": 0.8579021692276001,
"learning_rate": 0.00018320065925393468,
"loss": 1.3184,
"step": 96
},
{
"epoch": 0.23,
"grad_norm": 1.0116324424743652,
"learning_rate": 0.00018276889981568906,
"loss": 0.939,
"step": 97
},
{
"epoch": 0.23,
"grad_norm": 0.9020506143569946,
"learning_rate": 0.00018233218458973984,
"loss": 1.3675,
"step": 98
},
{
"epoch": 0.23,
"grad_norm": 0.7909868955612183,
"learning_rate": 0.00018189053972441025,
"loss": 1.0777,
"step": 99
},
{
"epoch": 0.23,
"grad_norm": 0.9654402136802673,
"learning_rate": 0.00018144399166318572,
"loss": 1.2697,
"step": 100
},
{
"epoch": 0.24,
"grad_norm": 0.9301828742027283,
"learning_rate": 0.0001809925671431304,
"loss": 1.3651,
"step": 101
},
{
"epoch": 0.24,
"grad_norm": 0.8000981211662292,
"learning_rate": 0.00018053629319328662,
"loss": 0.9806,
"step": 102
},
{
"epoch": 0.24,
"grad_norm": 0.7500132918357849,
"learning_rate": 0.00018007519713305605,
"loss": 1.0699,
"step": 103
},
{
"epoch": 0.24,
"grad_norm": 1.0019265413284302,
"learning_rate": 0.00017960930657056438,
"loss": 1.2713,
"step": 104
},
{
"epoch": 0.25,
"grad_norm": 0.8942422270774841,
"learning_rate": 0.00017913864940100808,
"loss": 0.9149,
"step": 105
},
{
"epoch": 0.25,
"grad_norm": 0.7721758484840393,
"learning_rate": 0.00017866325380498416,
"loss": 1.0378,
"step": 106
},
{
"epoch": 0.25,
"grad_norm": 0.9199188947677612,
"learning_rate": 0.000178183148246803,
"loss": 1.3192,
"step": 107
},
{
"epoch": 0.25,
"eval_loss": 1.1375699043273926,
"eval_runtime": 5.9115,
"eval_samples_per_second": 16.916,
"eval_steps_per_second": 16.916,
"step": 107
},
{
"epoch": 0.25,
"grad_norm": 0.7156293988227844,
"learning_rate": 0.0001776983614727838,
"loss": 0.854,
"step": 108
},
{
"epoch": 0.26,
"grad_norm": 1.0115437507629395,
"learning_rate": 0.00017720892250953373,
"loss": 0.9842,
"step": 109
},
{
"epoch": 0.26,
"grad_norm": 1.112038493156433,
"learning_rate": 0.00017671486066220965,
"loss": 1.0456,
"step": 110
},
{
"epoch": 0.26,
"grad_norm": 0.9239675402641296,
"learning_rate": 0.00017621620551276366,
"loss": 1.1104,
"step": 111
},
{
"epoch": 0.26,
"grad_norm": 0.8196420073509216,
"learning_rate": 0.00017571298691817177,
"loss": 0.7432,
"step": 112
},
{
"epoch": 0.27,
"grad_norm": 0.7901629209518433,
"learning_rate": 0.00017520523500864625,
"loss": 0.8345,
"step": 113
},
{
"epoch": 0.27,
"grad_norm": 1.0044571161270142,
"learning_rate": 0.0001746929801858317,
"loss": 1.1328,
"step": 114
},
{
"epoch": 0.27,
"grad_norm": 1.10731041431427,
"learning_rate": 0.00017417625312098452,
"loss": 1.1772,
"step": 115
},
{
"epoch": 0.27,
"grad_norm": 1.0169037580490112,
"learning_rate": 0.0001736550847531366,
"loss": 1.0579,
"step": 116
},
{
"epoch": 0.27,
"grad_norm": 0.8641659021377563,
"learning_rate": 0.00017312950628724295,
"loss": 0.8655,
"step": 117
},
{
"epoch": 0.28,
"grad_norm": 1.0836398601531982,
"learning_rate": 0.0001725995491923131,
"loss": 1.1701,
"step": 118
},
{
"epoch": 0.28,
"grad_norm": 1.0795177221298218,
"learning_rate": 0.00017206524519952697,
"loss": 1.3704,
"step": 119
},
{
"epoch": 0.28,
"grad_norm": 0.7752137184143066,
"learning_rate": 0.00017152662630033505,
"loss": 0.9499,
"step": 120
},
{
"epoch": 0.28,
"grad_norm": 0.8980644345283508,
"learning_rate": 0.00017098372474454277,
"loss": 0.915,
"step": 121
},
{
"epoch": 0.29,
"grad_norm": 0.7363729476928711,
"learning_rate": 0.00017043657303837963,
"loss": 0.9518,
"step": 122
},
{
"epoch": 0.29,
"grad_norm": 0.9289439916610718,
"learning_rate": 0.000169885203942553,
"loss": 1.1618,
"step": 123
},
{
"epoch": 0.29,
"grad_norm": 0.785468578338623,
"learning_rate": 0.0001693296504702862,
"loss": 1.1148,
"step": 124
},
{
"epoch": 0.29,
"grad_norm": 0.9278959035873413,
"learning_rate": 0.00016876994588534234,
"loss": 1.1782,
"step": 125
},
{
"epoch": 0.3,
"grad_norm": 0.8510153889656067,
"learning_rate": 0.00016820612370003221,
"loss": 1.1296,
"step": 126
},
{
"epoch": 0.3,
"grad_norm": 0.9500170350074768,
"learning_rate": 0.000167638217673208,
"loss": 0.8094,
"step": 127
},
{
"epoch": 0.3,
"grad_norm": 0.8451842665672302,
"learning_rate": 0.00016706626180824186,
"loss": 1.0722,
"step": 128
},
{
"epoch": 0.3,
"grad_norm": 0.6593143939971924,
"learning_rate": 0.00016649029035099,
"loss": 0.5532,
"step": 129
},
{
"epoch": 0.31,
"grad_norm": 1.0398962497711182,
"learning_rate": 0.0001659103377877423,
"loss": 0.8373,
"step": 130
},
{
"epoch": 0.31,
"grad_norm": 0.8942617774009705,
"learning_rate": 0.0001653264388431572,
"loss": 1.0992,
"step": 131
},
{
"epoch": 0.31,
"grad_norm": 0.7003504633903503,
"learning_rate": 0.00016473862847818277,
"loss": 0.699,
"step": 132
},
{
"epoch": 0.31,
"grad_norm": 0.7734900116920471,
"learning_rate": 0.00016414694188796345,
"loss": 0.9312,
"step": 133
},
{
"epoch": 0.31,
"grad_norm": 1.0958092212677002,
"learning_rate": 0.00016355141449973256,
"loss": 1.1076,
"step": 134
},
{
"epoch": 0.32,
"grad_norm": 0.8197342157363892,
"learning_rate": 0.0001629520819706912,
"loss": 1.1782,
"step": 135
},
{
"epoch": 0.32,
"grad_norm": 0.7238759994506836,
"learning_rate": 0.00016234898018587337,
"loss": 1.106,
"step": 136
},
{
"epoch": 0.32,
"grad_norm": 0.9790141582489014,
"learning_rate": 0.0001617421452559971,
"loss": 1.0013,
"step": 137
},
{
"epoch": 0.32,
"grad_norm": 0.8993759155273438,
"learning_rate": 0.0001611316135153026,
"loss": 1.0726,
"step": 138
},
{
"epoch": 0.33,
"grad_norm": 0.7299768328666687,
"learning_rate": 0.00016051742151937655,
"loss": 0.9232,
"step": 139
},
{
"epoch": 0.33,
"grad_norm": 1.0451937913894653,
"learning_rate": 0.0001598996060429634,
"loss": 1.2411,
"step": 140
},
{
"epoch": 0.33,
"grad_norm": 0.9933896064758301,
"learning_rate": 0.00015927820407776353,
"loss": 0.6061,
"step": 141
},
{
"epoch": 0.33,
"grad_norm": 0.6830273866653442,
"learning_rate": 0.0001586532528302183,
"loss": 0.9581,
"step": 142
},
{
"epoch": 0.34,
"grad_norm": 0.7997300624847412,
"learning_rate": 0.00015802478971928242,
"loss": 0.7054,
"step": 143
},
{
"epoch": 0.34,
"grad_norm": 0.8350067138671875,
"learning_rate": 0.0001573928523741832,
"loss": 1.1718,
"step": 144
},
{
"epoch": 0.34,
"grad_norm": 0.9091382026672363,
"learning_rate": 0.00015675747863216801,
"loss": 1.0653,
"step": 145
},
{
"epoch": 0.34,
"grad_norm": 0.9306054711341858,
"learning_rate": 0.00015611870653623825,
"loss": 1.3816,
"step": 146
},
{
"epoch": 0.35,
"grad_norm": 1.1074793338775635,
"learning_rate": 0.00015547657433287183,
"loss": 1.4123,
"step": 147
},
{
"epoch": 0.35,
"grad_norm": 0.8237017393112183,
"learning_rate": 0.0001548311204697331,
"loss": 1.1059,
"step": 148
},
{
"epoch": 0.35,
"grad_norm": 1.4395543336868286,
"learning_rate": 0.00015418238359337077,
"loss": 1.2508,
"step": 149
},
{
"epoch": 0.35,
"grad_norm": 1.2550331354141235,
"learning_rate": 0.00015353040254690393,
"loss": 1.8478,
"step": 150
},
{
"epoch": 0.35,
"grad_norm": 0.7537391781806946,
"learning_rate": 0.0001528752163676964,
"loss": 0.8779,
"step": 151
},
{
"epoch": 0.36,
"grad_norm": 1.1749041080474854,
"learning_rate": 0.00015221686428501928,
"loss": 1.1649,
"step": 152
},
{
"epoch": 0.36,
"grad_norm": 0.9532057046890259,
"learning_rate": 0.00015155538571770218,
"loss": 1.1493,
"step": 153
},
{
"epoch": 0.36,
"grad_norm": 1.0025173425674438,
"learning_rate": 0.0001508908202717729,
"loss": 1.2429,
"step": 154
},
{
"epoch": 0.36,
"grad_norm": 0.9108004570007324,
"learning_rate": 0.00015022320773808612,
"loss": 1.5981,
"step": 155
},
{
"epoch": 0.37,
"grad_norm": 0.9072840809822083,
"learning_rate": 0.00014955258808994096,
"loss": 1.2374,
"step": 156
},
{
"epoch": 0.37,
"grad_norm": 0.8806712031364441,
"learning_rate": 0.00014887900148068735,
"loss": 1.1942,
"step": 157
},
{
"epoch": 0.37,
"grad_norm": 1.1253175735473633,
"learning_rate": 0.0001482024882413222,
"loss": 1.1235,
"step": 158
},
{
"epoch": 0.37,
"grad_norm": 0.8281373977661133,
"learning_rate": 0.00014752308887807427,
"loss": 1.1125,
"step": 159
},
{
"epoch": 0.38,
"grad_norm": 0.9860203862190247,
"learning_rate": 0.00014684084406997903,
"loss": 1.12,
"step": 160
},
{
"epoch": 0.38,
"grad_norm": 0.9580567479133606,
"learning_rate": 0.00014615579466644292,
"loss": 1.1726,
"step": 161
},
{
"epoch": 0.38,
"grad_norm": 0.704282820224762,
"learning_rate": 0.00014546798168479756,
"loss": 1.1812,
"step": 162
},
{
"epoch": 0.38,
"grad_norm": 0.8092489838600159,
"learning_rate": 0.00014477744630784378,
"loss": 0.9743,
"step": 163
},
{
"epoch": 0.38,
"grad_norm": 0.875648021697998,
"learning_rate": 0.00014408422988138584,
"loss": 1.019,
"step": 164
},
{
"epoch": 0.39,
"grad_norm": 0.8234091401100159,
"learning_rate": 0.00014338837391175582,
"loss": 1.2794,
"step": 165
},
{
"epoch": 0.39,
"grad_norm": 0.8568207025527954,
"learning_rate": 0.00014268992006332846,
"loss": 1.1104,
"step": 166
},
{
"epoch": 0.39,
"grad_norm": 0.9901588559150696,
"learning_rate": 0.00014198891015602646,
"loss": 1.1187,
"step": 167
},
{
"epoch": 0.39,
"grad_norm": 1.0761953592300415,
"learning_rate": 0.0001412853861628166,
"loss": 1.5204,
"step": 168
},
{
"epoch": 0.4,
"grad_norm": 0.8662331104278564,
"learning_rate": 0.0001405793902071964,
"loss": 1.2189,
"step": 169
},
{
"epoch": 0.4,
"grad_norm": 0.8049296736717224,
"learning_rate": 0.00013987096456067236,
"loss": 1.3537,
"step": 170
},
{
"epoch": 0.4,
"grad_norm": 0.8470759391784668,
"learning_rate": 0.00013916015164022852,
"loss": 1.0526,
"step": 171
},
{
"epoch": 0.4,
"grad_norm": 0.9302281737327576,
"learning_rate": 0.00013844699400578696,
"loss": 0.9932,
"step": 172
},
{
"epoch": 0.41,
"grad_norm": 1.1691091060638428,
"learning_rate": 0.00013773153435765964,
"loss": 1.1426,
"step": 173
},
{
"epoch": 0.41,
"grad_norm": 0.9937382340431213,
"learning_rate": 0.00013701381553399145,
"loss": 1.326,
"step": 174
},
{
"epoch": 0.41,
"grad_norm": 0.8379032015800476,
"learning_rate": 0.00013629388050819547,
"loss": 1.0955,
"step": 175
},
{
"epoch": 0.41,
"grad_norm": 0.9588186144828796,
"learning_rate": 0.00013557177238637986,
"loss": 1.2328,
"step": 176
},
{
"epoch": 0.42,
"grad_norm": 0.7939326167106628,
"learning_rate": 0.00013484753440476692,
"loss": 1.1649,
"step": 177
},
{
"epoch": 0.42,
"grad_norm": 0.8485954403877258,
"learning_rate": 0.00013412120992710425,
"loss": 1.034,
"step": 178
},
{
"epoch": 0.42,
"grad_norm": 0.923659086227417,
"learning_rate": 0.00013339284244206847,
"loss": 1.245,
"step": 179
},
{
"epoch": 0.42,
"grad_norm": 0.9998313784599304,
"learning_rate": 0.00013266247556066122,
"loss": 1.4282,
"step": 180
},
{
"epoch": 0.42,
"grad_norm": 1.1997437477111816,
"learning_rate": 0.000131930153013598,
"loss": 1.3608,
"step": 181
},
{
"epoch": 0.43,
"grad_norm": 1.101218581199646,
"learning_rate": 0.0001311959186486898,
"loss": 1.3061,
"step": 182
},
{
"epoch": 0.43,
"grad_norm": 1.1238192319869995,
"learning_rate": 0.0001304598164282176,
"loss": 1.1255,
"step": 183
},
{
"epoch": 0.43,
"grad_norm": 1.3340548276901245,
"learning_rate": 0.00012972189042630044,
"loss": 1.5872,
"step": 184
},
{
"epoch": 0.43,
"grad_norm": 0.8796036839485168,
"learning_rate": 0.00012898218482625606,
"loss": 1.1176,
"step": 185
},
{
"epoch": 0.44,
"grad_norm": 0.9776093363761902,
"learning_rate": 0.0001282407439179557,
"loss": 1.1615,
"step": 186
},
{
"epoch": 0.44,
"grad_norm": 1.3886358737945557,
"learning_rate": 0.0001274976120951723,
"loss": 1.0288,
"step": 187
},
{
"epoch": 0.44,
"grad_norm": 0.9199085235595703,
"learning_rate": 0.00012675283385292212,
"loss": 0.9302,
"step": 188
},
{
"epoch": 0.44,
"grad_norm": 1.1090227365493774,
"learning_rate": 0.00012600645378480082,
"loss": 1.214,
"step": 189
},
{
"epoch": 0.45,
"grad_norm": 0.8784577250480652,
"learning_rate": 0.00012525851658031352,
"loss": 0.8802,
"step": 190
},
{
"epoch": 0.45,
"grad_norm": 0.860397219657898,
"learning_rate": 0.0001245090670221987,
"loss": 1.0587,
"step": 191
},
{
"epoch": 0.45,
"grad_norm": 1.014009952545166,
"learning_rate": 0.00012375814998374712,
"loss": 1.2103,
"step": 192
},
{
"epoch": 0.45,
"grad_norm": 0.6379606127738953,
"learning_rate": 0.00012300581042611492,
"loss": 0.4779,
"step": 193
},
{
"epoch": 0.46,
"grad_norm": 0.8548885583877563,
"learning_rate": 0.00012225209339563145,
"loss": 1.2684,
"step": 194
},
{
"epoch": 0.46,
"grad_norm": 0.5329421162605286,
"learning_rate": 0.00012149704402110243,
"loss": 0.5498,
"step": 195
},
{
"epoch": 0.46,
"grad_norm": 0.8345950245857239,
"learning_rate": 0.00012074070751110751,
"loss": 1.2684,
"step": 196
},
{
"epoch": 0.46,
"grad_norm": 0.8862521052360535,
"learning_rate": 0.00011998312915129371,
"loss": 1.1864,
"step": 197
},
{
"epoch": 0.46,
"grad_norm": 0.9100441336631775,
"learning_rate": 0.0001192243543016637,
"loss": 1.2897,
"step": 198
},
{
"epoch": 0.47,
"grad_norm": 1.1576776504516602,
"learning_rate": 0.00011846442839386003,
"loss": 1.3155,
"step": 199
},
{
"epoch": 0.47,
"grad_norm": 1.105965495109558,
"learning_rate": 0.00011770339692844483,
"loss": 0.9913,
"step": 200
},
{
"epoch": 0.47,
"grad_norm": 0.696582019329071,
"learning_rate": 0.00011694130547217554,
"loss": 0.9719,
"step": 201
},
{
"epoch": 0.47,
"grad_norm": 0.9100035429000854,
"learning_rate": 0.0001161781996552765,
"loss": 1.0476,
"step": 202
},
{
"epoch": 0.48,
"grad_norm": 0.9169908165931702,
"learning_rate": 0.00011541412516870684,
"loss": 0.909,
"step": 203
},
{
"epoch": 0.48,
"grad_norm": 0.8421806693077087,
"learning_rate": 0.00011464912776142494,
"loss": 1.1626,
"step": 204
},
{
"epoch": 0.48,
"grad_norm": 0.8510706424713135,
"learning_rate": 0.00011388325323764888,
"loss": 1.4659,
"step": 205
},
{
"epoch": 0.48,
"grad_norm": 1.1053885221481323,
"learning_rate": 0.00011311654745411425,
"loss": 1.4878,
"step": 206
},
{
"epoch": 0.49,
"grad_norm": 0.933678388595581,
"learning_rate": 0.00011234905631732819,
"loss": 0.9186,
"step": 207
},
{
"epoch": 0.49,
"grad_norm": 0.8687578439712524,
"learning_rate": 0.00011158082578082089,
"loss": 0.7517,
"step": 208
},
{
"epoch": 0.49,
"grad_norm": 0.8849249482154846,
"learning_rate": 0.00011081190184239419,
"loss": 1.0061,
"step": 209
},
{
"epoch": 0.49,
"grad_norm": 0.8970105648040771,
"learning_rate": 0.00011004233054136725,
"loss": 1.0486,
"step": 210
},
{
"epoch": 0.5,
"grad_norm": 1.0767931938171387,
"learning_rate": 0.00010927215795582012,
"loss": 1.2699,
"step": 211
},
{
"epoch": 0.5,
"grad_norm": 0.9513278007507324,
"learning_rate": 0.00010850143019983474,
"loss": 1.1946,
"step": 212
},
{
"epoch": 0.5,
"grad_norm": 1.0254892110824585,
"learning_rate": 0.0001077301934207339,
"loss": 1.4022,
"step": 213
},
{
"epoch": 0.5,
"grad_norm": 0.7836260199546814,
"learning_rate": 0.00010695849379631813,
"loss": 1.3095,
"step": 214
},
{
"epoch": 0.5,
"eval_loss": 1.1224589347839355,
"eval_runtime": 5.9458,
"eval_samples_per_second": 16.819,
"eval_steps_per_second": 16.819,
"step": 214
},
{
"epoch": 0.5,
"grad_norm": 0.9416055679321289,
"learning_rate": 0.00010618637753210085,
"loss": 1.0996,
"step": 215
},
{
"epoch": 0.51,
"grad_norm": 0.8331229090690613,
"learning_rate": 0.00010541389085854176,
"loss": 0.9657,
"step": 216
},
{
"epoch": 0.51,
"grad_norm": 0.7178126573562622,
"learning_rate": 0.00010464108002827882,
"loss": 0.7778,
"step": 217
},
{
"epoch": 0.51,
"grad_norm": 1.8502916097640991,
"learning_rate": 0.00010386799131335889,
"loss": 1.4472,
"step": 218
},
{
"epoch": 0.51,
"grad_norm": 0.8758464455604553,
"learning_rate": 0.00010309467100246713,
"loss": 1.1188,
"step": 219
},
{
"epoch": 0.52,
"grad_norm": 0.8960602879524231,
"learning_rate": 0.00010232116539815558,
"loss": 1.1453,
"step": 220
},
{
"epoch": 0.52,
"grad_norm": 0.8716662526130676,
"learning_rate": 0.00010154752081407066,
"loss": 1.1906,
"step": 221
},
{
"epoch": 0.52,
"grad_norm": 1.0372726917266846,
"learning_rate": 0.00010077378357218021,
"loss": 1.2253,
"step": 222
},
{
"epoch": 0.52,
"grad_norm": 1.2147200107574463,
"learning_rate": 0.0001,
"loss": 1.1959,
"step": 223
},
{
"epoch": 0.53,
"grad_norm": 1.0374308824539185,
"learning_rate": 9.92262164278198e-05,
"loss": 1.5055,
"step": 224
},
{
"epoch": 0.53,
"grad_norm": 1.026852011680603,
"learning_rate": 9.845247918592937e-05,
"loss": 1.3998,
"step": 225
},
{
"epoch": 0.53,
"grad_norm": 1.0135536193847656,
"learning_rate": 9.767883460184443e-05,
"loss": 0.8342,
"step": 226
},
{
"epoch": 0.53,
"grad_norm": 0.6277703642845154,
"learning_rate": 9.69053289975329e-05,
"loss": 0.6104,
"step": 227
},
{
"epoch": 0.54,
"grad_norm": 0.8149964213371277,
"learning_rate": 9.613200868664112e-05,
"loss": 1.1625,
"step": 228
},
{
"epoch": 0.54,
"grad_norm": 1.0715911388397217,
"learning_rate": 9.53589199717212e-05,
"loss": 1.0454,
"step": 229
},
{
"epoch": 0.54,
"grad_norm": 0.9631131887435913,
"learning_rate": 9.458610914145826e-05,
"loss": 1.0737,
"step": 230
},
{
"epoch": 0.54,
"grad_norm": 0.8216869235038757,
"learning_rate": 9.381362246789917e-05,
"loss": 1.0467,
"step": 231
},
{
"epoch": 0.54,
"grad_norm": 1.1427477598190308,
"learning_rate": 9.304150620368188e-05,
"loss": 1.2994,
"step": 232
},
{
"epoch": 0.55,
"grad_norm": 0.8490440845489502,
"learning_rate": 9.226980657926614e-05,
"loss": 1.0697,
"step": 233
},
{
"epoch": 0.55,
"grad_norm": 1.339479684829712,
"learning_rate": 9.149856980016529e-05,
"loss": 1.6431,
"step": 234
},
{
"epoch": 0.55,
"grad_norm": 0.7970044016838074,
"learning_rate": 9.072784204417995e-05,
"loss": 1.0936,
"step": 235
},
{
"epoch": 0.55,
"grad_norm": 1.135980486869812,
"learning_rate": 8.995766945863277e-05,
"loss": 1.2341,
"step": 236
},
{
"epoch": 0.56,
"grad_norm": 0.663905680179596,
"learning_rate": 8.918809815760585e-05,
"loss": 0.8369,
"step": 237
},
{
"epoch": 0.56,
"grad_norm": 0.7740184664726257,
"learning_rate": 8.841917421917912e-05,
"loss": 1.0605,
"step": 238
},
{
"epoch": 0.56,
"grad_norm": 0.9601121544837952,
"learning_rate": 8.765094368267186e-05,
"loss": 1.1868,
"step": 239
},
{
"epoch": 0.56,
"grad_norm": 0.9471805095672607,
"learning_rate": 8.688345254588578e-05,
"loss": 1.1898,
"step": 240
},
{
"epoch": 0.57,
"grad_norm": 0.8799854516983032,
"learning_rate": 8.611674676235115e-05,
"loss": 1.0781,
"step": 241
},
{
"epoch": 0.57,
"grad_norm": 1.255386233329773,
"learning_rate": 8.535087223857508e-05,
"loss": 1.0936,
"step": 242
},
{
"epoch": 0.57,
"grad_norm": 0.8445800542831421,
"learning_rate": 8.458587483129316e-05,
"loss": 1.2126,
"step": 243
},
{
"epoch": 0.57,
"grad_norm": 0.7667735815048218,
"learning_rate": 8.382180034472353e-05,
"loss": 1.1693,
"step": 244
},
{
"epoch": 0.58,
"grad_norm": 0.8405752182006836,
"learning_rate": 8.305869452782446e-05,
"loss": 1.2401,
"step": 245
},
{
"epoch": 0.58,
"grad_norm": 1.1948215961456299,
"learning_rate": 8.229660307155518e-05,
"loss": 1.4077,
"step": 246
},
{
"epoch": 0.58,
"grad_norm": 1.0930763483047485,
"learning_rate": 8.153557160613998e-05,
"loss": 1.3625,
"step": 247
},
{
"epoch": 0.58,
"grad_norm": 0.8063607215881348,
"learning_rate": 8.077564569833632e-05,
"loss": 1.0265,
"step": 248
},
{
"epoch": 0.58,
"grad_norm": 0.8202766180038452,
"learning_rate": 8.00168708487063e-05,
"loss": 1.154,
"step": 249
},
{
"epoch": 0.59,
"grad_norm": 1.57820463180542,
"learning_rate": 7.92592924888925e-05,
"loss": 1.5653,
"step": 250
},
{
"epoch": 0.59,
"grad_norm": 0.9507820010185242,
"learning_rate": 7.85029559788976e-05,
"loss": 1.0407,
"step": 251
},
{
"epoch": 0.59,
"grad_norm": 0.890489935874939,
"learning_rate": 7.774790660436858e-05,
"loss": 1.2915,
"step": 252
},
{
"epoch": 0.59,
"grad_norm": 0.9816429018974304,
"learning_rate": 7.699418957388512e-05,
"loss": 1.3749,
"step": 253
},
{
"epoch": 0.6,
"grad_norm": 0.8766152262687683,
"learning_rate": 7.624185001625292e-05,
"loss": 1.0009,
"step": 254
},
{
"epoch": 0.6,
"grad_norm": 1.0196894407272339,
"learning_rate": 7.549093297780132e-05,
"loss": 1.2238,
"step": 255
},
{
"epoch": 0.6,
"grad_norm": 1.0334336757659912,
"learning_rate": 7.474148341968652e-05,
"loss": 0.8767,
"step": 256
},
{
"epoch": 0.6,
"grad_norm": 0.8634482026100159,
"learning_rate": 7.39935462151992e-05,
"loss": 1.4253,
"step": 257
},
{
"epoch": 0.61,
"grad_norm": 0.9882122278213501,
"learning_rate": 7.324716614707793e-05,
"loss": 1.3695,
"step": 258
},
{
"epoch": 0.61,
"grad_norm": 0.7892170548439026,
"learning_rate": 7.250238790482773e-05,
"loss": 1.1509,
"step": 259
},
{
"epoch": 0.61,
"grad_norm": 0.858100414276123,
"learning_rate": 7.175925608204428e-05,
"loss": 1.1307,
"step": 260
},
{
"epoch": 0.61,
"grad_norm": 1.1348087787628174,
"learning_rate": 7.101781517374398e-05,
"loss": 1.0939,
"step": 261
},
{
"epoch": 0.62,
"grad_norm": 1.0761414766311646,
"learning_rate": 7.027810957369957e-05,
"loss": 1.4693,
"step": 262
},
{
"epoch": 0.62,
"grad_norm": 0.971714198589325,
"learning_rate": 6.954018357178241e-05,
"loss": 1.333,
"step": 263
},
{
"epoch": 0.62,
"grad_norm": 0.748940646648407,
"learning_rate": 6.880408135131022e-05,
"loss": 0.9636,
"step": 264
},
{
"epoch": 0.62,
"grad_norm": 0.8127878904342651,
"learning_rate": 6.806984698640202e-05,
"loss": 0.8037,
"step": 265
},
{
"epoch": 0.62,
"grad_norm": 0.8631081581115723,
"learning_rate": 6.733752443933878e-05,
"loss": 0.977,
"step": 266
},
{
"epoch": 0.63,
"grad_norm": 0.8358796238899231,
"learning_rate": 6.660715755793154e-05,
"loss": 1.0804,
"step": 267
},
{
"epoch": 0.63,
"grad_norm": 0.9398666620254517,
"learning_rate": 6.587879007289576e-05,
"loss": 1.357,
"step": 268
},
{
"epoch": 0.63,
"grad_norm": 1.1066324710845947,
"learning_rate": 6.515246559523312e-05,
"loss": 1.2569,
"step": 269
},
{
"epoch": 0.63,
"grad_norm": 1.116248607635498,
"learning_rate": 6.442822761362015e-05,
"loss": 1.2156,
"step": 270
},
{
"epoch": 0.64,
"grad_norm": 1.0011835098266602,
"learning_rate": 6.370611949180457e-05,
"loss": 1.2736,
"step": 271
},
{
"epoch": 0.64,
"grad_norm": 0.8156257271766663,
"learning_rate": 6.298618446600856e-05,
"loss": 0.9655,
"step": 272
},
{
"epoch": 0.64,
"grad_norm": 0.8273128271102905,
"learning_rate": 6.22684656423404e-05,
"loss": 0.7496,
"step": 273
},
{
"epoch": 0.64,
"grad_norm": 0.855414628982544,
"learning_rate": 6.155300599421306e-05,
"loss": 1.1053,
"step": 274
},
{
"epoch": 0.65,
"grad_norm": 0.8847690224647522,
"learning_rate": 6.0839848359771536e-05,
"loss": 1.2419,
"step": 275
},
{
"epoch": 0.65,
"grad_norm": 0.7055469751358032,
"learning_rate": 6.012903543932766e-05,
"loss": 0.9169,
"step": 276
},
{
"epoch": 0.65,
"grad_norm": 1.0745937824249268,
"learning_rate": 5.9420609792803604e-05,
"loss": 1.2988,
"step": 277
},
{
"epoch": 0.65,
"grad_norm": 1.488973617553711,
"learning_rate": 5.871461383718344e-05,
"loss": 1.3937,
"step": 278
},
{
"epoch": 0.65,
"grad_norm": 0.8680241703987122,
"learning_rate": 5.801108984397354e-05,
"loss": 0.7092,
"step": 279
},
{
"epoch": 0.66,
"grad_norm": 0.7179112434387207,
"learning_rate": 5.7310079936671545e-05,
"loss": 0.9667,
"step": 280
},
{
"epoch": 0.66,
"grad_norm": 0.944363534450531,
"learning_rate": 5.6611626088244194e-05,
"loss": 1.2307,
"step": 281
},
{
"epoch": 0.66,
"grad_norm": 0.797068178653717,
"learning_rate": 5.59157701186142e-05,
"loss": 1.1412,
"step": 282
},
{
"epoch": 0.66,
"grad_norm": 0.9618858695030212,
"learning_rate": 5.522255369215622e-05,
"loss": 1.1069,
"step": 283
},
{
"epoch": 0.67,
"grad_norm": 1.047471284866333,
"learning_rate": 5.453201831520245e-05,
"loss": 1.0406,
"step": 284
},
{
"epoch": 0.67,
"grad_norm": 0.8067429065704346,
"learning_rate": 5.38442053335571e-05,
"loss": 1.1467,
"step": 285
},
{
"epoch": 0.67,
"grad_norm": 0.8198029398918152,
"learning_rate": 5.3159155930021e-05,
"loss": 1.1139,
"step": 286
},
{
"epoch": 0.67,
"grad_norm": 0.7728237509727478,
"learning_rate": 5.247691112192577e-05,
"loss": 0.8623,
"step": 287
},
{
"epoch": 0.68,
"grad_norm": 0.8685657978057861,
"learning_rate": 5.179751175867784e-05,
"loss": 1.0307,
"step": 288
},
{
"epoch": 0.68,
"grad_norm": 0.7406818866729736,
"learning_rate": 5.112099851931265e-05,
"loss": 0.9348,
"step": 289
},
{
"epoch": 0.68,
"grad_norm": 1.017289638519287,
"learning_rate": 5.044741191005908e-05,
"loss": 1.131,
"step": 290
},
{
"epoch": 0.68,
"grad_norm": 0.7423017024993896,
"learning_rate": 4.9776792261913896e-05,
"loss": 1.0436,
"step": 291
},
{
"epoch": 0.69,
"grad_norm": 0.7347959876060486,
"learning_rate": 4.910917972822713e-05,
"loss": 1.0401,
"step": 292
},
{
"epoch": 0.69,
"grad_norm": 0.9067183136940002,
"learning_rate": 4.844461428229782e-05,
"loss": 1.3648,
"step": 293
},
{
"epoch": 0.69,
"grad_norm": 0.8086903691291809,
"learning_rate": 4.7783135714980744e-05,
"loss": 1.1079,
"step": 294
},
{
"epoch": 0.69,
"grad_norm": 0.8241545557975769,
"learning_rate": 4.712478363230362e-05,
"loss": 1.3574,
"step": 295
},
{
"epoch": 0.69,
"grad_norm": 0.8495978713035583,
"learning_rate": 4.646959745309609e-05,
"loss": 1.152,
"step": 296
},
{
"epoch": 0.7,
"grad_norm": 0.8596299290657043,
"learning_rate": 4.581761640662927e-05,
"loss": 1.2126,
"step": 297
},
{
"epoch": 0.7,
"grad_norm": 0.9820694327354431,
"learning_rate": 4.516887953026691e-05,
"loss": 1.0377,
"step": 298
},
{
"epoch": 0.7,
"grad_norm": 0.8206368088722229,
"learning_rate": 4.452342566712818e-05,
"loss": 1.0879,
"step": 299
},
{
"epoch": 0.7,
"grad_norm": 0.9816787242889404,
"learning_rate": 4.388129346376178e-05,
"loss": 1.1499,
"step": 300
},
{
"epoch": 0.71,
"grad_norm": 0.8422172665596008,
"learning_rate": 4.3242521367832015e-05,
"loss": 1.0807,
"step": 301
},
{
"epoch": 0.71,
"grad_norm": 0.9412662386894226,
"learning_rate": 4.260714762581677e-05,
"loss": 1.067,
"step": 302
},
{
"epoch": 0.71,
"grad_norm": 0.8067646026611328,
"learning_rate": 4.197521028071765e-05,
"loss": 1.2295,
"step": 303
},
{
"epoch": 0.71,
"grad_norm": 1.0259385108947754,
"learning_rate": 4.13467471697817e-05,
"loss": 0.9919,
"step": 304
},
{
"epoch": 0.72,
"grad_norm": 0.8335386514663696,
"learning_rate": 4.0721795922236496e-05,
"loss": 1.0636,
"step": 305
},
{
"epoch": 0.72,
"grad_norm": 1.1450220346450806,
"learning_rate": 4.010039395703664e-05,
"loss": 1.1161,
"step": 306
},
{
"epoch": 0.72,
"grad_norm": 0.9241200089454651,
"learning_rate": 3.948257848062351e-05,
"loss": 1.2009,
"step": 307
},
{
"epoch": 0.72,
"grad_norm": 1.1173282861709595,
"learning_rate": 3.8868386484697417e-05,
"loss": 1.195,
"step": 308
},
{
"epoch": 0.73,
"grad_norm": 0.9357693791389465,
"learning_rate": 3.825785474400291e-05,
"loss": 1.0678,
"step": 309
},
{
"epoch": 0.73,
"grad_norm": 1.0329748392105103,
"learning_rate": 3.7651019814126654e-05,
"loss": 1.4416,
"step": 310
},
{
"epoch": 0.73,
"grad_norm": 0.9039399027824402,
"learning_rate": 3.7047918029308815e-05,
"loss": 1.0469,
"step": 311
},
{
"epoch": 0.73,
"grad_norm": 1.1105232238769531,
"learning_rate": 3.6448585500267485e-05,
"loss": 1.2242,
"step": 312
},
{
"epoch": 0.73,
"grad_norm": 0.9321856498718262,
"learning_rate": 3.5853058112036596e-05,
"loss": 1.2051,
"step": 313
},
{
"epoch": 0.74,
"grad_norm": 0.8780171871185303,
"learning_rate": 3.5261371521817244e-05,
"loss": 1.0514,
"step": 314
},
{
"epoch": 0.74,
"grad_norm": 1.3793842792510986,
"learning_rate": 3.467356115684284e-05,
"loss": 1.2994,
"step": 315
},
{
"epoch": 0.74,
"grad_norm": 0.9189188480377197,
"learning_rate": 3.408966221225773e-05,
"loss": 0.9158,
"step": 316
},
{
"epoch": 0.74,
"grad_norm": 0.979479193687439,
"learning_rate": 3.350970964900998e-05,
"loss": 1.0156,
"step": 317
},
{
"epoch": 0.75,
"grad_norm": 1.0084733963012695,
"learning_rate": 3.293373819175816e-05,
"loss": 1.1564,
"step": 318
},
{
"epoch": 0.75,
"grad_norm": 0.708268404006958,
"learning_rate": 3.236178232679202e-05,
"loss": 0.6927,
"step": 319
},
{
"epoch": 0.75,
"grad_norm": 0.8528488278388977,
"learning_rate": 3.1793876299967816e-05,
"loss": 1.2442,
"step": 320
},
{
"epoch": 0.75,
"grad_norm": 0.9393438100814819,
"learning_rate": 3.123005411465766e-05,
"loss": 1.3178,
"step": 321
},
{
"epoch": 0.75,
"eval_loss": 1.1110302209854126,
"eval_runtime": 6.0903,
"eval_samples_per_second": 16.42,
"eval_steps_per_second": 16.42,
"step": 321
},
{
"epoch": 0.76,
"grad_norm": 0.8227856159210205,
"learning_rate": 3.0670349529713816e-05,
"loss": 1.0518,
"step": 322
},
{
"epoch": 0.76,
"grad_norm": 0.8559508323669434,
"learning_rate": 3.0114796057447026e-05,
"loss": 1.0809,
"step": 323
},
{
"epoch": 0.76,
"grad_norm": 0.8127685189247131,
"learning_rate": 2.9563426961620367e-05,
"loss": 1.2037,
"step": 324
},
{
"epoch": 0.76,
"grad_norm": 0.8787775039672852,
"learning_rate": 2.901627525545726e-05,
"loss": 1.0329,
"step": 325
},
{
"epoch": 0.77,
"grad_norm": 0.9373772740364075,
"learning_rate": 2.8473373699664997e-05,
"loss": 1.2817,
"step": 326
},
{
"epoch": 0.77,
"grad_norm": 0.8289972543716431,
"learning_rate": 2.793475480047303e-05,
"loss": 0.9784,
"step": 327
},
{
"epoch": 0.77,
"grad_norm": 1.0169614553451538,
"learning_rate": 2.7400450807686938e-05,
"loss": 1.2231,
"step": 328
},
{
"epoch": 0.77,
"grad_norm": 0.8996503949165344,
"learning_rate": 2.687049371275705e-05,
"loss": 1.0073,
"step": 329
},
{
"epoch": 0.77,
"grad_norm": 1.0333868265151978,
"learning_rate": 2.6344915246863412e-05,
"loss": 1.2796,
"step": 330
},
{
"epoch": 0.78,
"grad_norm": 0.8676667213439941,
"learning_rate": 2.582374687901553e-05,
"loss": 1.3943,
"step": 331
},
{
"epoch": 0.78,
"grad_norm": 0.9073312878608704,
"learning_rate": 2.5307019814168342e-05,
"loss": 1.4125,
"step": 332
},
{
"epoch": 0.78,
"grad_norm": 0.7397946119308472,
"learning_rate": 2.4794764991353748e-05,
"loss": 0.9884,
"step": 333
},
{
"epoch": 0.78,
"grad_norm": 0.8001339435577393,
"learning_rate": 2.4287013081828257e-05,
"loss": 1.0607,
"step": 334
},
{
"epoch": 0.79,
"grad_norm": 0.7958093881607056,
"learning_rate": 2.3783794487236365e-05,
"loss": 1.0917,
"step": 335
},
{
"epoch": 0.79,
"grad_norm": 0.7405944466590881,
"learning_rate": 2.328513933779034e-05,
"loss": 1.1819,
"step": 336
},
{
"epoch": 0.79,
"grad_norm": 0.8886005878448486,
"learning_rate": 2.2791077490466262e-05,
"loss": 1.1103,
"step": 337
},
{
"epoch": 0.79,
"grad_norm": 0.9879472255706787,
"learning_rate": 2.2301638527216194e-05,
"loss": 1.5232,
"step": 338
},
{
"epoch": 0.8,
"grad_norm": 0.6994455456733704,
"learning_rate": 2.181685175319702e-05,
"loss": 1.0496,
"step": 339
},
{
"epoch": 0.8,
"grad_norm": 1.373903512954712,
"learning_rate": 2.1336746195015846e-05,
"loss": 1.2936,
"step": 340
},
{
"epoch": 0.8,
"grad_norm": 1.3520807027816772,
"learning_rate": 2.0861350598991945e-05,
"loss": 1.5331,
"step": 341
},
{
"epoch": 0.8,
"grad_norm": 0.7715508341789246,
"learning_rate": 2.0390693429435627e-05,
"loss": 1.0536,
"step": 342
},
{
"epoch": 0.81,
"grad_norm": 0.9478192925453186,
"learning_rate": 1.992480286694397e-05,
"loss": 1.3032,
"step": 343
},
{
"epoch": 0.81,
"grad_norm": 0.8158330917358398,
"learning_rate": 1.946370680671341e-05,
"loss": 1.1646,
"step": 344
},
{
"epoch": 0.81,
"grad_norm": 1.5716131925582886,
"learning_rate": 1.90074328568696e-05,
"loss": 1.8497,
"step": 345
},
{
"epoch": 0.81,
"grad_norm": 0.9847422242164612,
"learning_rate": 1.85560083368143e-05,
"loss": 1.181,
"step": 346
},
{
"epoch": 0.81,
"grad_norm": 1.0614407062530518,
"learning_rate": 1.8109460275589773e-05,
"loss": 1.3074,
"step": 347
},
{
"epoch": 0.82,
"grad_norm": 0.9004321098327637,
"learning_rate": 1.766781541026018e-05,
"loss": 1.0441,
"step": 348
},
{
"epoch": 0.82,
"grad_norm": 1.0964152812957764,
"learning_rate": 1.7231100184310956e-05,
"loss": 1.1583,
"step": 349
},
{
"epoch": 0.82,
"grad_norm": 0.939444363117218,
"learning_rate": 1.679934074606533e-05,
"loss": 1.2871,
"step": 350
},
{
"epoch": 0.82,
"grad_norm": 0.8655087351799011,
"learning_rate": 1.6372562947118763e-05,
"loss": 1.1802,
"step": 351
},
{
"epoch": 0.83,
"grad_norm": 0.8392437696456909,
"learning_rate": 1.5950792340791043e-05,
"loss": 1.0773,
"step": 352
},
{
"epoch": 0.83,
"grad_norm": 0.856883704662323,
"learning_rate": 1.5534054180596415e-05,
"loss": 1.111,
"step": 353
},
{
"epoch": 0.83,
"grad_norm": 0.9138004183769226,
"learning_rate": 1.5122373418731306e-05,
"loss": 0.9742,
"step": 354
},
{
"epoch": 0.83,
"grad_norm": 0.8892737627029419,
"learning_rate": 1.4715774704580453e-05,
"loss": 1.3606,
"step": 355
},
{
"epoch": 0.84,
"grad_norm": 0.8643128275871277,
"learning_rate": 1.4314282383241096e-05,
"loss": 1.2462,
"step": 356
},
{
"epoch": 0.84,
"grad_norm": 0.8623273372650146,
"learning_rate": 1.3917920494065029e-05,
"loss": 0.9758,
"step": 357
},
{
"epoch": 0.84,
"grad_norm": 0.9717140793800354,
"learning_rate": 1.3526712769219618e-05,
"loss": 1.1752,
"step": 358
},
{
"epoch": 0.84,
"grad_norm": 1.449154257774353,
"learning_rate": 1.3140682632266543e-05,
"loss": 0.974,
"step": 359
},
{
"epoch": 0.85,
"grad_norm": 0.8342477083206177,
"learning_rate": 1.2759853196759453e-05,
"loss": 1.0768,
"step": 360
},
{
"epoch": 0.85,
"grad_norm": 0.699179470539093,
"learning_rate": 1.2384247264859972e-05,
"loss": 1.0079,
"step": 361
},
{
"epoch": 0.85,
"grad_norm": 0.9927258491516113,
"learning_rate": 1.201388732597255e-05,
"loss": 1.16,
"step": 362
},
{
"epoch": 0.85,
"grad_norm": 0.8921307325363159,
"learning_rate": 1.1648795555397719e-05,
"loss": 1.1092,
"step": 363
},
{
"epoch": 0.85,
"grad_norm": 0.8358816504478455,
"learning_rate": 1.1288993813004467e-05,
"loss": 0.9703,
"step": 364
},
{
"epoch": 0.86,
"grad_norm": 1.0156934261322021,
"learning_rate": 1.0934503641921402e-05,
"loss": 1.3109,
"step": 365
},
{
"epoch": 0.86,
"grad_norm": 0.7621612548828125,
"learning_rate": 1.0585346267246743e-05,
"loss": 0.5513,
"step": 366
},
{
"epoch": 0.86,
"grad_norm": 1.0127166509628296,
"learning_rate": 1.0241542594777576e-05,
"loss": 0.9964,
"step": 367
},
{
"epoch": 0.86,
"grad_norm": 0.8973998427391052,
"learning_rate": 9.903113209758096e-06,
"loss": 1.1234,
"step": 368
},
{
"epoch": 0.87,
"grad_norm": 1.2160240411758423,
"learning_rate": 9.570078375647006e-06,
"loss": 1.8291,
"step": 369
},
{
"epoch": 0.87,
"grad_norm": 0.9975070357322693,
"learning_rate": 9.242458032904311e-06,
"loss": 1.0111,
"step": 370
},
{
"epoch": 0.87,
"grad_norm": 1.1726033687591553,
"learning_rate": 8.92027179779732e-06,
"loss": 1.3626,
"step": 371
},
{
"epoch": 0.87,
"grad_norm": 0.8554450869560242,
"learning_rate": 8.603538961226232e-06,
"loss": 1.2957,
"step": 372
},
{
"epoch": 0.88,
"grad_norm": 0.8581518530845642,
"learning_rate": 8.29227848756895e-06,
"loss": 1.1524,
"step": 373
},
{
"epoch": 0.88,
"grad_norm": 0.834023118019104,
"learning_rate": 7.986509013545673e-06,
"loss": 1.0957,
"step": 374
},
{
"epoch": 0.88,
"grad_norm": 0.9786900281906128,
"learning_rate": 7.686248847103072e-06,
"loss": 0.9576,
"step": 375
},
{
"epoch": 0.88,
"grad_norm": 1.0205256938934326,
"learning_rate": 7.3915159663179075e-06,
"loss": 1.4658,
"step": 376
},
{
"epoch": 0.88,
"grad_norm": 1.0132267475128174,
"learning_rate": 7.102328018320858e-06,
"loss": 1.1489,
"step": 377
},
{
"epoch": 0.89,
"grad_norm": 0.8851252198219299,
"learning_rate": 6.818702318239689e-06,
"loss": 1.1875,
"step": 378
},
{
"epoch": 0.89,
"grad_norm": 0.658548891544342,
"learning_rate": 6.540655848162602e-06,
"loss": 0.7974,
"step": 379
},
{
"epoch": 0.89,
"grad_norm": 0.877100944519043,
"learning_rate": 6.268205256121396e-06,
"loss": 1.2753,
"step": 380
},
{
"epoch": 0.89,
"grad_norm": 0.9094898104667664,
"learning_rate": 6.001366855094748e-06,
"loss": 1.2567,
"step": 381
},
{
"epoch": 0.9,
"grad_norm": 1.538279414176941,
"learning_rate": 5.7401566220313005e-06,
"loss": 0.9588,
"step": 382
},
{
"epoch": 0.9,
"grad_norm": 0.7195213437080383,
"learning_rate": 5.484590196893247e-06,
"loss": 0.7562,
"step": 383
},
{
"epoch": 0.9,
"grad_norm": 0.9021754860877991,
"learning_rate": 5.2346828817197655e-06,
"loss": 1.3057,
"step": 384
},
{
"epoch": 0.9,
"grad_norm": 0.8472947478294373,
"learning_rate": 4.990449639710815e-06,
"loss": 1.308,
"step": 385
},
{
"epoch": 0.91,
"grad_norm": 0.9867368936538696,
"learning_rate": 4.7519050943312325e-06,
"loss": 1.2082,
"step": 386
},
{
"epoch": 0.91,
"grad_norm": 0.7588489055633545,
"learning_rate": 4.5190635284352075e-06,
"loss": 1.1311,
"step": 387
},
{
"epoch": 0.91,
"grad_norm": 0.763214111328125,
"learning_rate": 4.291938883411007e-06,
"loss": 1.2086,
"step": 388
},
{
"epoch": 0.91,
"grad_norm": 0.8648312091827393,
"learning_rate": 4.070544758346273e-06,
"loss": 1.005,
"step": 389
},
{
"epoch": 0.92,
"grad_norm": 1.0920186042785645,
"learning_rate": 3.85489440921376e-06,
"loss": 1.0133,
"step": 390
},
{
"epoch": 0.92,
"grad_norm": 0.7375323176383972,
"learning_rate": 3.6450007480777093e-06,
"loss": 0.9391,
"step": 391
},
{
"epoch": 0.92,
"grad_norm": 0.8165771961212158,
"learning_rate": 3.440876342320609e-06,
"loss": 1.0479,
"step": 392
},
{
"epoch": 0.92,
"grad_norm": 0.8869404792785645,
"learning_rate": 3.2425334138908583e-06,
"loss": 1.08,
"step": 393
},
{
"epoch": 0.92,
"grad_norm": 0.7623941898345947,
"learning_rate": 3.049983838570858e-06,
"loss": 1.1899,
"step": 394
},
{
"epoch": 0.93,
"grad_norm": 0.9894457459449768,
"learning_rate": 2.863239145266028e-06,
"loss": 1.3285,
"step": 395
},
{
"epoch": 0.93,
"grad_norm": 0.9151179194450378,
"learning_rate": 2.682310515314512e-06,
"loss": 1.0843,
"step": 396
},
{
"epoch": 0.93,
"grad_norm": 0.9756556749343872,
"learning_rate": 2.5072087818176382e-06,
"loss": 1.3407,
"step": 397
},
{
"epoch": 0.93,
"grad_norm": 0.8172594308853149,
"learning_rate": 2.3379444289913342e-06,
"loss": 1.0445,
"step": 398
},
{
"epoch": 0.94,
"grad_norm": 1.0837291479110718,
"learning_rate": 2.174527591538367e-06,
"loss": 1.2118,
"step": 399
},
{
"epoch": 0.94,
"grad_norm": 0.9625768661499023,
"learning_rate": 2.016968054041546e-06,
"loss": 1.1475,
"step": 400
},
{
"epoch": 0.94,
"grad_norm": 0.9723854660987854,
"learning_rate": 1.8652752503778404e-06,
"loss": 1.1693,
"step": 401
},
{
"epoch": 0.94,
"grad_norm": 1.2548187971115112,
"learning_rate": 1.7194582631535617e-06,
"loss": 1.3045,
"step": 402
},
{
"epoch": 0.95,
"grad_norm": 0.6179590821266174,
"learning_rate": 1.5795258231605103e-06,
"loss": 0.4655,
"step": 403
},
{
"epoch": 0.95,
"grad_norm": 0.8926462531089783,
"learning_rate": 1.4454863088532388e-06,
"loss": 0.8854,
"step": 404
},
{
"epoch": 0.95,
"grad_norm": 1.0097955465316772,
"learning_rate": 1.317347745847386e-06,
"loss": 1.3568,
"step": 405
},
{
"epoch": 0.95,
"grad_norm": 0.8205281496047974,
"learning_rate": 1.19511780643915e-06,
"loss": 0.9675,
"step": 406
},
{
"epoch": 0.96,
"grad_norm": 0.9589681029319763,
"learning_rate": 1.0788038091458897e-06,
"loss": 1.1202,
"step": 407
},
{
"epoch": 0.96,
"grad_norm": 0.8816035985946655,
"learning_rate": 9.684127182679526e-07,
"loss": 1.1746,
"step": 408
},
{
"epoch": 0.96,
"grad_norm": 0.8051217198371887,
"learning_rate": 8.639511434716863e-07,
"loss": 0.975,
"step": 409
},
{
"epoch": 0.96,
"grad_norm": 0.8666868209838867,
"learning_rate": 7.654253393936439e-07,
"loss": 1.2602,
"step": 410
},
{
"epoch": 0.96,
"grad_norm": 0.9826870560646057,
"learning_rate": 6.728412052661504e-07,
"loss": 1.1426,
"step": 411
},
{
"epoch": 0.97,
"grad_norm": 1.006511926651001,
"learning_rate": 5.862042845640403e-07,
"loss": 1.1518,
"step": 412
},
{
"epoch": 0.97,
"grad_norm": 0.7082429528236389,
"learning_rate": 5.055197646727572e-07,
"loss": 0.9126,
"step": 413
},
{
"epoch": 0.97,
"grad_norm": 1.163749098777771,
"learning_rate": 4.307924765777682e-07,
"loss": 1.3254,
"step": 414
},
{
"epoch": 0.97,
"grad_norm": 0.9457451105117798,
"learning_rate": 3.620268945752847e-07,
"loss": 1.605,
"step": 415
},
{
"epoch": 0.98,
"grad_norm": 0.7310549020767212,
"learning_rate": 2.9922713600439854e-07,
"loss": 0.9009,
"step": 416
},
{
"epoch": 0.98,
"grad_norm": 1.1131972074508667,
"learning_rate": 2.423969610005017e-07,
"loss": 1.1344,
"step": 417
},
{
"epoch": 0.98,
"grad_norm": 0.9373881816864014,
"learning_rate": 1.915397722702217e-07,
"loss": 1.0736,
"step": 418
},
{
"epoch": 0.98,
"grad_norm": 0.819059431552887,
"learning_rate": 1.4665861488761813e-07,
"loss": 0.8256,
"step": 419
},
{
"epoch": 0.99,
"grad_norm": 0.8947961926460266,
"learning_rate": 1.0775617611189503e-07,
"loss": 1.3029,
"step": 420
},
{
"epoch": 0.99,
"grad_norm": 0.7374153137207031,
"learning_rate": 7.483478522649634e-08,
"loss": 1.0694,
"step": 421
},
{
"epoch": 0.99,
"grad_norm": 0.8008151650428772,
"learning_rate": 4.789641339963957e-08,
"loss": 1.0821,
"step": 422
},
{
"epoch": 0.99,
"grad_norm": 1.0294992923736572,
"learning_rate": 2.6942673566265897e-08,
"loss": 1.3806,
"step": 423
},
{
"epoch": 1.0,
"grad_norm": 0.9338717460632324,
"learning_rate": 1.1974820331517312e-08,
"loss": 1.3182,
"step": 424
},
{
"epoch": 1.0,
"grad_norm": 0.9727728366851807,
"learning_rate": 2.9937498955745493e-09,
"loss": 1.3143,
"step": 425
},
{
"epoch": 1.0,
"grad_norm": 0.943352997303009,
"learning_rate": 0.0,
"loss": 0.9565,
"step": 426
}
],
"logging_steps": 1,
"max_steps": 426,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 7758429166387200.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}