spinny / trainer_state.json
sizhkhy's picture
Upload folder using huggingface_hub
3bf05f3 verified
{
"best_metric": 0.007892865687608719,
"best_model_checkpoint": "/home/paperspace/Data/models/spinny/llm3br256/checkpoint-250",
"epoch": 4.971098265895954,
"eval_steps": 5,
"global_step": 430,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011560693641618497,
"grad_norm": 0.15129172801971436,
"learning_rate": 2.325581395348837e-06,
"loss": 0.068,
"step": 1
},
{
"epoch": 0.023121387283236993,
"grad_norm": 0.14174357056617737,
"learning_rate": 4.651162790697674e-06,
"loss": 0.0635,
"step": 2
},
{
"epoch": 0.03468208092485549,
"grad_norm": 0.14430448412895203,
"learning_rate": 6.976744186046512e-06,
"loss": 0.0649,
"step": 3
},
{
"epoch": 0.046242774566473986,
"grad_norm": 0.1410493552684784,
"learning_rate": 9.302325581395349e-06,
"loss": 0.063,
"step": 4
},
{
"epoch": 0.057803468208092484,
"grad_norm": 0.13995003700256348,
"learning_rate": 1.1627906976744187e-05,
"loss": 0.0598,
"step": 5
},
{
"epoch": 0.057803468208092484,
"eval_loss": 0.05565303936600685,
"eval_runtime": 11.8576,
"eval_samples_per_second": 4.217,
"eval_steps_per_second": 1.096,
"step": 5
},
{
"epoch": 0.06936416184971098,
"grad_norm": 0.12226737290620804,
"learning_rate": 1.3953488372093024e-05,
"loss": 0.0533,
"step": 6
},
{
"epoch": 0.08092485549132948,
"grad_norm": 0.10596886277198792,
"learning_rate": 1.6279069767441862e-05,
"loss": 0.0544,
"step": 7
},
{
"epoch": 0.09248554913294797,
"grad_norm": 0.07781537622213364,
"learning_rate": 1.8604651162790697e-05,
"loss": 0.0448,
"step": 8
},
{
"epoch": 0.10404624277456648,
"grad_norm": 0.06554879248142242,
"learning_rate": 2.0930232558139536e-05,
"loss": 0.0373,
"step": 9
},
{
"epoch": 0.11560693641618497,
"grad_norm": 0.08980869501829147,
"learning_rate": 2.3255813953488374e-05,
"loss": 0.0388,
"step": 10
},
{
"epoch": 0.11560693641618497,
"eval_loss": 0.034586433321237564,
"eval_runtime": 10.0247,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 1.297,
"step": 10
},
{
"epoch": 0.12716763005780346,
"grad_norm": 0.08015415817499161,
"learning_rate": 2.5581395348837212e-05,
"loss": 0.0393,
"step": 11
},
{
"epoch": 0.13872832369942195,
"grad_norm": 0.06148159131407738,
"learning_rate": 2.7906976744186048e-05,
"loss": 0.0377,
"step": 12
},
{
"epoch": 0.15028901734104047,
"grad_norm": 0.04883963614702225,
"learning_rate": 3.0232558139534883e-05,
"loss": 0.0356,
"step": 13
},
{
"epoch": 0.16184971098265896,
"grad_norm": 0.04954546317458153,
"learning_rate": 3.2558139534883724e-05,
"loss": 0.0363,
"step": 14
},
{
"epoch": 0.17341040462427745,
"grad_norm": 0.041509971022605896,
"learning_rate": 3.488372093023256e-05,
"loss": 0.0275,
"step": 15
},
{
"epoch": 0.17341040462427745,
"eval_loss": 0.027632026001811028,
"eval_runtime": 10.0235,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 1.297,
"step": 15
},
{
"epoch": 0.18497109826589594,
"grad_norm": 0.041887976229190826,
"learning_rate": 3.7209302325581394e-05,
"loss": 0.0303,
"step": 16
},
{
"epoch": 0.19653179190751446,
"grad_norm": 0.03965295851230621,
"learning_rate": 3.953488372093023e-05,
"loss": 0.0284,
"step": 17
},
{
"epoch": 0.20809248554913296,
"grad_norm": 0.03458382189273834,
"learning_rate": 4.186046511627907e-05,
"loss": 0.03,
"step": 18
},
{
"epoch": 0.21965317919075145,
"grad_norm": 0.03138720989227295,
"learning_rate": 4.418604651162791e-05,
"loss": 0.0242,
"step": 19
},
{
"epoch": 0.23121387283236994,
"grad_norm": 0.02681548520922661,
"learning_rate": 4.651162790697675e-05,
"loss": 0.0218,
"step": 20
},
{
"epoch": 0.23121387283236994,
"eval_loss": 0.02280299924314022,
"eval_runtime": 10.0675,
"eval_samples_per_second": 4.967,
"eval_steps_per_second": 1.291,
"step": 20
},
{
"epoch": 0.24277456647398843,
"grad_norm": 0.03174450621008873,
"learning_rate": 4.883720930232558e-05,
"loss": 0.0235,
"step": 21
},
{
"epoch": 0.2543352601156069,
"grad_norm": 0.033451005816459656,
"learning_rate": 5.1162790697674425e-05,
"loss": 0.0291,
"step": 22
},
{
"epoch": 0.2658959537572254,
"grad_norm": 0.03227928280830383,
"learning_rate": 5.348837209302326e-05,
"loss": 0.0254,
"step": 23
},
{
"epoch": 0.2774566473988439,
"grad_norm": 0.03783709928393364,
"learning_rate": 5.5813953488372095e-05,
"loss": 0.0256,
"step": 24
},
{
"epoch": 0.28901734104046245,
"grad_norm": 0.03228365629911423,
"learning_rate": 5.8139534883720933e-05,
"loss": 0.0236,
"step": 25
},
{
"epoch": 0.28901734104046245,
"eval_loss": 0.020257851108908653,
"eval_runtime": 10.0419,
"eval_samples_per_second": 4.979,
"eval_steps_per_second": 1.295,
"step": 25
},
{
"epoch": 0.30057803468208094,
"grad_norm": 0.02823326177895069,
"learning_rate": 6.0465116279069765e-05,
"loss": 0.0229,
"step": 26
},
{
"epoch": 0.31213872832369943,
"grad_norm": 0.02882234752178192,
"learning_rate": 6.27906976744186e-05,
"loss": 0.0226,
"step": 27
},
{
"epoch": 0.3236994219653179,
"grad_norm": 0.02527730166912079,
"learning_rate": 6.511627906976745e-05,
"loss": 0.0196,
"step": 28
},
{
"epoch": 0.3352601156069364,
"grad_norm": 0.026926985010504723,
"learning_rate": 6.744186046511628e-05,
"loss": 0.0182,
"step": 29
},
{
"epoch": 0.3468208092485549,
"grad_norm": 0.02383883111178875,
"learning_rate": 6.976744186046513e-05,
"loss": 0.0182,
"step": 30
},
{
"epoch": 0.3468208092485549,
"eval_loss": 0.01791333220899105,
"eval_runtime": 10.1038,
"eval_samples_per_second": 4.949,
"eval_steps_per_second": 1.287,
"step": 30
},
{
"epoch": 0.3583815028901734,
"grad_norm": 0.020905988290905952,
"learning_rate": 7.209302325581396e-05,
"loss": 0.0173,
"step": 31
},
{
"epoch": 0.3699421965317919,
"grad_norm": 0.021837348118424416,
"learning_rate": 7.441860465116279e-05,
"loss": 0.0186,
"step": 32
},
{
"epoch": 0.3815028901734104,
"grad_norm": 0.022534186020493507,
"learning_rate": 7.674418604651163e-05,
"loss": 0.0192,
"step": 33
},
{
"epoch": 0.3930635838150289,
"grad_norm": 0.021404536440968513,
"learning_rate": 7.906976744186047e-05,
"loss": 0.0163,
"step": 34
},
{
"epoch": 0.4046242774566474,
"grad_norm": 0.02087543159723282,
"learning_rate": 8.139534883720931e-05,
"loss": 0.019,
"step": 35
},
{
"epoch": 0.4046242774566474,
"eval_loss": 0.016155000776052475,
"eval_runtime": 10.0056,
"eval_samples_per_second": 4.997,
"eval_steps_per_second": 1.299,
"step": 35
},
{
"epoch": 0.4161849710982659,
"grad_norm": 0.023041941225528717,
"learning_rate": 8.372093023255814e-05,
"loss": 0.0157,
"step": 36
},
{
"epoch": 0.4277456647398844,
"grad_norm": 0.020684899762272835,
"learning_rate": 8.604651162790697e-05,
"loss": 0.0154,
"step": 37
},
{
"epoch": 0.4393063583815029,
"grad_norm": 0.019204530864953995,
"learning_rate": 8.837209302325582e-05,
"loss": 0.0142,
"step": 38
},
{
"epoch": 0.4508670520231214,
"grad_norm": 0.019893651828169823,
"learning_rate": 9.069767441860465e-05,
"loss": 0.0155,
"step": 39
},
{
"epoch": 0.4624277456647399,
"grad_norm": 0.022773414850234985,
"learning_rate": 9.30232558139535e-05,
"loss": 0.017,
"step": 40
},
{
"epoch": 0.4624277456647399,
"eval_loss": 0.01472202967852354,
"eval_runtime": 10.018,
"eval_samples_per_second": 4.991,
"eval_steps_per_second": 1.298,
"step": 40
},
{
"epoch": 0.47398843930635837,
"grad_norm": 0.021773461252450943,
"learning_rate": 9.534883720930233e-05,
"loss": 0.0161,
"step": 41
},
{
"epoch": 0.48554913294797686,
"grad_norm": 0.023095447570085526,
"learning_rate": 9.767441860465116e-05,
"loss": 0.0146,
"step": 42
},
{
"epoch": 0.49710982658959535,
"grad_norm": 0.0202298853546381,
"learning_rate": 0.0001,
"loss": 0.015,
"step": 43
},
{
"epoch": 0.5086705202312138,
"grad_norm": 0.01809552311897278,
"learning_rate": 9.999835253787473e-05,
"loss": 0.0149,
"step": 44
},
{
"epoch": 0.5202312138728323,
"grad_norm": 0.019870778545737267,
"learning_rate": 9.999341026006419e-05,
"loss": 0.0147,
"step": 45
},
{
"epoch": 0.5202312138728323,
"eval_loss": 0.013716931454837322,
"eval_runtime": 10.0201,
"eval_samples_per_second": 4.99,
"eval_steps_per_second": 1.297,
"step": 45
},
{
"epoch": 0.5317919075144508,
"grad_norm": 0.017238808795809746,
"learning_rate": 9.998517349225698e-05,
"loss": 0.0107,
"step": 46
},
{
"epoch": 0.5433526011560693,
"grad_norm": 0.022697314620018005,
"learning_rate": 9.997364277724361e-05,
"loss": 0.0149,
"step": 47
},
{
"epoch": 0.5549132947976878,
"grad_norm": 0.023472866043448448,
"learning_rate": 9.99588188748808e-05,
"loss": 0.0144,
"step": 48
},
{
"epoch": 0.5664739884393064,
"grad_norm": 0.017129473388195038,
"learning_rate": 9.994070276204116e-05,
"loss": 0.0126,
"step": 49
},
{
"epoch": 0.5780346820809249,
"grad_norm": 0.01756882295012474,
"learning_rate": 9.991929563254914e-05,
"loss": 0.0118,
"step": 50
},
{
"epoch": 0.5780346820809249,
"eval_loss": 0.013151152059435844,
"eval_runtime": 10.0009,
"eval_samples_per_second": 5.0,
"eval_steps_per_second": 1.3,
"step": 50
},
{
"epoch": 0.5895953757225434,
"grad_norm": 0.019612474367022514,
"learning_rate": 9.989459889710213e-05,
"loss": 0.0144,
"step": 51
},
{
"epoch": 0.6011560693641619,
"grad_norm": 0.016709089279174805,
"learning_rate": 9.986661418317759e-05,
"loss": 0.0119,
"step": 52
},
{
"epoch": 0.6127167630057804,
"grad_norm": 0.023054232820868492,
"learning_rate": 9.983534333492575e-05,
"loss": 0.0137,
"step": 53
},
{
"epoch": 0.6242774566473989,
"grad_norm": 0.02029217965900898,
"learning_rate": 9.980078841304816e-05,
"loss": 0.014,
"step": 54
},
{
"epoch": 0.6358381502890174,
"grad_norm": 0.018532264977693558,
"learning_rate": 9.976295169466178e-05,
"loss": 0.0107,
"step": 55
},
{
"epoch": 0.6358381502890174,
"eval_loss": 0.012713148258626461,
"eval_runtime": 10.0087,
"eval_samples_per_second": 4.996,
"eval_steps_per_second": 1.299,
"step": 55
},
{
"epoch": 0.6473988439306358,
"grad_norm": 0.020468739792704582,
"learning_rate": 9.97218356731491e-05,
"loss": 0.0146,
"step": 56
},
{
"epoch": 0.6589595375722543,
"grad_norm": 0.01887972466647625,
"learning_rate": 9.967744305799357e-05,
"loss": 0.0126,
"step": 57
},
{
"epoch": 0.6705202312138728,
"grad_norm": 0.02040836587548256,
"learning_rate": 9.962977677460132e-05,
"loss": 0.0126,
"step": 58
},
{
"epoch": 0.6820809248554913,
"grad_norm": 0.02685651369392872,
"learning_rate": 9.957883996410821e-05,
"loss": 0.0122,
"step": 59
},
{
"epoch": 0.6936416184971098,
"grad_norm": 0.02112666517496109,
"learning_rate": 9.952463598317285e-05,
"loss": 0.016,
"step": 60
},
{
"epoch": 0.6936416184971098,
"eval_loss": 0.012294010259211063,
"eval_runtime": 10.0073,
"eval_samples_per_second": 4.996,
"eval_steps_per_second": 1.299,
"step": 60
},
{
"epoch": 0.7052023121387283,
"grad_norm": 0.020788514986634254,
"learning_rate": 9.946716840375551e-05,
"loss": 0.0106,
"step": 61
},
{
"epoch": 0.7167630057803468,
"grad_norm": 0.01940411888062954,
"learning_rate": 9.940644101288259e-05,
"loss": 0.0098,
"step": 62
},
{
"epoch": 0.7283236994219653,
"grad_norm": 0.02072029560804367,
"learning_rate": 9.934245781239714e-05,
"loss": 0.0142,
"step": 63
},
{
"epoch": 0.7398843930635838,
"grad_norm": 0.017221063375473022,
"learning_rate": 9.927522301869515e-05,
"loss": 0.0114,
"step": 64
},
{
"epoch": 0.7514450867052023,
"grad_norm": 0.026088256388902664,
"learning_rate": 9.920474106244763e-05,
"loss": 0.0144,
"step": 65
},
{
"epoch": 0.7514450867052023,
"eval_loss": 0.011602817103266716,
"eval_runtime": 10.0191,
"eval_samples_per_second": 4.99,
"eval_steps_per_second": 1.298,
"step": 65
},
{
"epoch": 0.7630057803468208,
"grad_norm": 0.016981270164251328,
"learning_rate": 9.91310165883088e-05,
"loss": 0.0136,
"step": 66
},
{
"epoch": 0.7745664739884393,
"grad_norm": 0.017894290387630463,
"learning_rate": 9.905405445460972e-05,
"loss": 0.0113,
"step": 67
},
{
"epoch": 0.7861271676300579,
"grad_norm": 0.017901500687003136,
"learning_rate": 9.897385973303845e-05,
"loss": 0.0133,
"step": 68
},
{
"epoch": 0.7976878612716763,
"grad_norm": 0.018053261563181877,
"learning_rate": 9.889043770830566e-05,
"loss": 0.0108,
"step": 69
},
{
"epoch": 0.8092485549132948,
"grad_norm": 0.023738127201795578,
"learning_rate": 9.880379387779637e-05,
"loss": 0.0119,
"step": 70
},
{
"epoch": 0.8092485549132948,
"eval_loss": 0.011262123472988605,
"eval_runtime": 10.0281,
"eval_samples_per_second": 4.986,
"eval_steps_per_second": 1.296,
"step": 70
},
{
"epoch": 0.8208092485549133,
"grad_norm": 0.01628166250884533,
"learning_rate": 9.871393395120774e-05,
"loss": 0.014,
"step": 71
},
{
"epoch": 0.8323699421965318,
"grad_norm": 0.014171602204442024,
"learning_rate": 9.862086385017283e-05,
"loss": 0.0099,
"step": 72
},
{
"epoch": 0.8439306358381503,
"grad_norm": 0.01973879709839821,
"learning_rate": 9.852458970787026e-05,
"loss": 0.011,
"step": 73
},
{
"epoch": 0.8554913294797688,
"grad_norm": 0.015728101134300232,
"learning_rate": 9.842511786862019e-05,
"loss": 0.01,
"step": 74
},
{
"epoch": 0.8670520231213873,
"grad_norm": 0.015097449533641338,
"learning_rate": 9.832245488746611e-05,
"loss": 0.0111,
"step": 75
},
{
"epoch": 0.8670520231213873,
"eval_loss": 0.010938560590147972,
"eval_runtime": 10.0124,
"eval_samples_per_second": 4.994,
"eval_steps_per_second": 1.298,
"step": 75
},
{
"epoch": 0.8786127167630058,
"grad_norm": 0.02237934060394764,
"learning_rate": 9.821660752974293e-05,
"loss": 0.0142,
"step": 76
},
{
"epoch": 0.8901734104046243,
"grad_norm": 0.01948779821395874,
"learning_rate": 9.810758277063119e-05,
"loss": 0.0138,
"step": 77
},
{
"epoch": 0.9017341040462428,
"grad_norm": 0.022487910464406013,
"learning_rate": 9.799538779469734e-05,
"loss": 0.0151,
"step": 78
},
{
"epoch": 0.9132947976878613,
"grad_norm": 0.017546942457556725,
"learning_rate": 9.78800299954203e-05,
"loss": 0.0098,
"step": 79
},
{
"epoch": 0.9248554913294798,
"grad_norm": 0.021010980010032654,
"learning_rate": 9.77615169747043e-05,
"loss": 0.012,
"step": 80
},
{
"epoch": 0.9248554913294798,
"eval_loss": 0.010658971033990383,
"eval_runtime": 10.0307,
"eval_samples_per_second": 4.985,
"eval_steps_per_second": 1.296,
"step": 80
},
{
"epoch": 0.9364161849710982,
"grad_norm": 0.018796751275658607,
"learning_rate": 9.763985654237786e-05,
"loss": 0.0126,
"step": 81
},
{
"epoch": 0.9479768786127167,
"grad_norm": 0.020107241347432137,
"learning_rate": 9.751505671567913e-05,
"loss": 0.0129,
"step": 82
},
{
"epoch": 0.9595375722543352,
"grad_norm": 0.015596112236380577,
"learning_rate": 9.738712571872763e-05,
"loss": 0.0093,
"step": 83
},
{
"epoch": 0.9710982658959537,
"grad_norm": 0.018946580588817596,
"learning_rate": 9.725607198198227e-05,
"loss": 0.0139,
"step": 84
},
{
"epoch": 0.9826589595375722,
"grad_norm": 0.01942116767168045,
"learning_rate": 9.712190414168572e-05,
"loss": 0.0139,
"step": 85
},
{
"epoch": 0.9826589595375722,
"eval_loss": 0.010201876051723957,
"eval_runtime": 10.0325,
"eval_samples_per_second": 4.984,
"eval_steps_per_second": 1.296,
"step": 85
},
{
"epoch": 0.9942196531791907,
"grad_norm": 0.01808648556470871,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0096,
"step": 86
},
{
"epoch": 1.0057803468208093,
"grad_norm": 0.033104732632637024,
"learning_rate": 9.684426172090085e-05,
"loss": 0.0208,
"step": 87
},
{
"epoch": 1.0173410404624277,
"grad_norm": 0.011730443686246872,
"learning_rate": 9.67008054366274e-05,
"loss": 0.0079,
"step": 88
},
{
"epoch": 1.0289017341040463,
"grad_norm": 0.017196012660861015,
"learning_rate": 9.65542716400269e-05,
"loss": 0.0103,
"step": 89
},
{
"epoch": 1.0404624277456647,
"grad_norm": 0.015379775315523148,
"learning_rate": 9.640466998745456e-05,
"loss": 0.0085,
"step": 90
},
{
"epoch": 1.0404624277456647,
"eval_loss": 0.01041076984256506,
"eval_runtime": 10.0354,
"eval_samples_per_second": 4.982,
"eval_steps_per_second": 1.295,
"step": 90
},
{
"epoch": 1.0520231213872833,
"grad_norm": 0.01682409830391407,
"learning_rate": 9.625201033743261e-05,
"loss": 0.0102,
"step": 91
},
{
"epoch": 1.0635838150289016,
"grad_norm": 0.017098382115364075,
"learning_rate": 9.609630275000072e-05,
"loss": 0.0081,
"step": 92
},
{
"epoch": 1.0751445086705202,
"grad_norm": 0.017288707196712494,
"learning_rate": 9.5937557486053e-05,
"loss": 0.0094,
"step": 93
},
{
"epoch": 1.0867052023121386,
"grad_norm": 0.018024342134594917,
"learning_rate": 9.577578500666187e-05,
"loss": 0.0116,
"step": 94
},
{
"epoch": 1.0982658959537572,
"grad_norm": 0.017229463905096054,
"learning_rate": 9.56109959723886e-05,
"loss": 0.01,
"step": 95
},
{
"epoch": 1.0982658959537572,
"eval_loss": 0.010240535251796246,
"eval_runtime": 10.0835,
"eval_samples_per_second": 4.959,
"eval_steps_per_second": 1.289,
"step": 95
},
{
"epoch": 1.1098265895953756,
"grad_norm": 0.015899403020739555,
"learning_rate": 9.544320124258092e-05,
"loss": 0.0099,
"step": 96
},
{
"epoch": 1.1213872832369942,
"grad_norm": 0.019960559904575348,
"learning_rate": 9.527241187465734e-05,
"loss": 0.0098,
"step": 97
},
{
"epoch": 1.1329479768786128,
"grad_norm": 0.01425469946116209,
"learning_rate": 9.509863912337842e-05,
"loss": 0.0084,
"step": 98
},
{
"epoch": 1.1445086705202312,
"grad_norm": 0.015252132900059223,
"learning_rate": 9.492189444010521e-05,
"loss": 0.01,
"step": 99
},
{
"epoch": 1.1560693641618498,
"grad_norm": 0.014408037066459656,
"learning_rate": 9.474218947204459e-05,
"loss": 0.009,
"step": 100
},
{
"epoch": 1.1560693641618498,
"eval_loss": 0.009868398308753967,
"eval_runtime": 10.0137,
"eval_samples_per_second": 4.993,
"eval_steps_per_second": 1.298,
"step": 100
},
{
"epoch": 1.1676300578034682,
"grad_norm": 0.01568921096622944,
"learning_rate": 9.455953606148172e-05,
"loss": 0.0125,
"step": 101
},
{
"epoch": 1.1791907514450868,
"grad_norm": 0.01693912222981453,
"learning_rate": 9.437394624499958e-05,
"loss": 0.0116,
"step": 102
},
{
"epoch": 1.1907514450867052,
"grad_norm": 0.018201276659965515,
"learning_rate": 9.418543225268596e-05,
"loss": 0.0117,
"step": 103
},
{
"epoch": 1.2023121387283238,
"grad_norm": 0.015434252098202705,
"learning_rate": 9.399400650732735e-05,
"loss": 0.011,
"step": 104
},
{
"epoch": 1.2138728323699421,
"grad_norm": 0.01606677658855915,
"learning_rate": 9.379968162359034e-05,
"loss": 0.0094,
"step": 105
},
{
"epoch": 1.2138728323699421,
"eval_loss": 0.009825030341744423,
"eval_runtime": 10.0305,
"eval_samples_per_second": 4.985,
"eval_steps_per_second": 1.296,
"step": 105
},
{
"epoch": 1.2254335260115607,
"grad_norm": 0.015970559790730476,
"learning_rate": 9.360247040719039e-05,
"loss": 0.0079,
"step": 106
},
{
"epoch": 1.2369942196531791,
"grad_norm": 0.01553067285567522,
"learning_rate": 9.340238585404788e-05,
"loss": 0.0077,
"step": 107
},
{
"epoch": 1.2485549132947977,
"grad_norm": 0.017365023493766785,
"learning_rate": 9.319944114943171e-05,
"loss": 0.0089,
"step": 108
},
{
"epoch": 1.260115606936416,
"grad_norm": 0.01562884822487831,
"learning_rate": 9.29936496670905e-05,
"loss": 0.0076,
"step": 109
},
{
"epoch": 1.2716763005780347,
"grad_norm": 0.016722485423088074,
"learning_rate": 9.278502496837116e-05,
"loss": 0.0069,
"step": 110
},
{
"epoch": 1.2716763005780347,
"eval_loss": 0.009917480871081352,
"eval_runtime": 9.9972,
"eval_samples_per_second": 5.001,
"eval_steps_per_second": 1.3,
"step": 110
},
{
"epoch": 1.2832369942196533,
"grad_norm": 0.019191304221749306,
"learning_rate": 9.257358080132523e-05,
"loss": 0.0108,
"step": 111
},
{
"epoch": 1.2947976878612717,
"grad_norm": 0.02037121169269085,
"learning_rate": 9.235933109980301e-05,
"loss": 0.011,
"step": 112
},
{
"epoch": 1.30635838150289,
"grad_norm": 0.016349676996469498,
"learning_rate": 9.214228998253527e-05,
"loss": 0.0084,
"step": 113
},
{
"epoch": 1.3179190751445087,
"grad_norm": 0.01671903021633625,
"learning_rate": 9.192247175220276e-05,
"loss": 0.0087,
"step": 114
},
{
"epoch": 1.3294797687861273,
"grad_norm": 0.01710616424679756,
"learning_rate": 9.16998908944939e-05,
"loss": 0.0108,
"step": 115
},
{
"epoch": 1.3294797687861273,
"eval_loss": 0.009632885456085205,
"eval_runtime": 10.0191,
"eval_samples_per_second": 4.99,
"eval_steps_per_second": 1.298,
"step": 115
},
{
"epoch": 1.3410404624277457,
"grad_norm": 0.01672513224184513,
"learning_rate": 9.147456207714997e-05,
"loss": 0.0077,
"step": 116
},
{
"epoch": 1.352601156069364,
"grad_norm": 0.017153792083263397,
"learning_rate": 9.124650014899867e-05,
"loss": 0.0101,
"step": 117
},
{
"epoch": 1.3641618497109826,
"grad_norm": 0.018158160150051117,
"learning_rate": 9.101572013897555e-05,
"loss": 0.0103,
"step": 118
},
{
"epoch": 1.3757225433526012,
"grad_norm": 0.017670975998044014,
"learning_rate": 9.078223725513366e-05,
"loss": 0.0112,
"step": 119
},
{
"epoch": 1.3872832369942196,
"grad_norm": 0.013258930295705795,
"learning_rate": 9.05460668836413e-05,
"loss": 0.0066,
"step": 120
},
{
"epoch": 1.3872832369942196,
"eval_loss": 0.009479235857725143,
"eval_runtime": 10.0092,
"eval_samples_per_second": 4.995,
"eval_steps_per_second": 1.299,
"step": 120
},
{
"epoch": 1.3988439306358382,
"grad_norm": 0.016367750242352486,
"learning_rate": 9.030722458776814e-05,
"loss": 0.0083,
"step": 121
},
{
"epoch": 1.4104046242774566,
"grad_norm": 0.01966327428817749,
"learning_rate": 9.006572610685968e-05,
"loss": 0.013,
"step": 122
},
{
"epoch": 1.4219653179190752,
"grad_norm": 0.014227735809981823,
"learning_rate": 8.98215873552999e-05,
"loss": 0.0081,
"step": 123
},
{
"epoch": 1.4335260115606936,
"grad_norm": 0.0126969488337636,
"learning_rate": 8.957482442146272e-05,
"loss": 0.0069,
"step": 124
},
{
"epoch": 1.4450867052023122,
"grad_norm": 0.0182335264980793,
"learning_rate": 8.932545356665157e-05,
"loss": 0.0089,
"step": 125
},
{
"epoch": 1.4450867052023122,
"eval_loss": 0.009439178742468357,
"eval_runtime": 10.0231,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 1.297,
"step": 125
},
{
"epoch": 1.4566473988439306,
"grad_norm": 0.013425913639366627,
"learning_rate": 8.907349122402801e-05,
"loss": 0.0064,
"step": 126
},
{
"epoch": 1.4682080924855492,
"grad_norm": 0.017706867307424545,
"learning_rate": 8.881895399752874e-05,
"loss": 0.0085,
"step": 127
},
{
"epoch": 1.4797687861271676,
"grad_norm": 0.015954311937093735,
"learning_rate": 8.856185866077129e-05,
"loss": 0.0091,
"step": 128
},
{
"epoch": 1.4913294797687862,
"grad_norm": 0.01800154522061348,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0118,
"step": 129
},
{
"epoch": 1.5028901734104045,
"grad_norm": 0.022386785596609116,
"learning_rate": 8.80400615927139e-05,
"loss": 0.0084,
"step": 130
},
{
"epoch": 1.5028901734104045,
"eval_loss": 0.009285805746912956,
"eval_runtime": 10.0195,
"eval_samples_per_second": 4.99,
"eval_steps_per_second": 1.297,
"step": 130
},
{
"epoch": 1.5144508670520231,
"grad_norm": 0.015237067826092243,
"learning_rate": 8.777539424705023e-05,
"loss": 0.0067,
"step": 131
},
{
"epoch": 1.5260115606936417,
"grad_norm": 0.01740083284676075,
"learning_rate": 8.750823756013498e-05,
"loss": 0.0111,
"step": 132
},
{
"epoch": 1.5375722543352601,
"grad_norm": 0.01614670269191265,
"learning_rate": 8.72386091371891e-05,
"loss": 0.0066,
"step": 133
},
{
"epoch": 1.5491329479768785,
"grad_norm": 0.018181053921580315,
"learning_rate": 8.696652674631717e-05,
"loss": 0.0089,
"step": 134
},
{
"epoch": 1.560693641618497,
"grad_norm": 0.01898285746574402,
"learning_rate": 8.669200831733655e-05,
"loss": 0.0102,
"step": 135
},
{
"epoch": 1.560693641618497,
"eval_loss": 0.00925290398299694,
"eval_runtime": 10.0096,
"eval_samples_per_second": 4.995,
"eval_steps_per_second": 1.299,
"step": 135
},
{
"epoch": 1.5722543352601157,
"grad_norm": 0.01583796739578247,
"learning_rate": 8.641507194059579e-05,
"loss": 0.0084,
"step": 136
},
{
"epoch": 1.583815028901734,
"grad_norm": 0.020259637385606766,
"learning_rate": 8.613573586578262e-05,
"loss": 0.0097,
"step": 137
},
{
"epoch": 1.5953757225433525,
"grad_norm": 0.0170609038323164,
"learning_rate": 8.585401850072113e-05,
"loss": 0.0102,
"step": 138
},
{
"epoch": 1.606936416184971,
"grad_norm": 0.013348150067031384,
"learning_rate": 8.55699384101589e-05,
"loss": 0.0076,
"step": 139
},
{
"epoch": 1.6184971098265897,
"grad_norm": 0.0179379191249609,
"learning_rate": 8.528351431454351e-05,
"loss": 0.01,
"step": 140
},
{
"epoch": 1.6184971098265897,
"eval_loss": 0.009090203791856766,
"eval_runtime": 10.0116,
"eval_samples_per_second": 4.994,
"eval_steps_per_second": 1.298,
"step": 140
},
{
"epoch": 1.630057803468208,
"grad_norm": 0.014754540286958218,
"learning_rate": 8.499476508878893e-05,
"loss": 0.0091,
"step": 141
},
{
"epoch": 1.6416184971098264,
"grad_norm": 0.014680921100080013,
"learning_rate": 8.47037097610317e-05,
"loss": 0.008,
"step": 142
},
{
"epoch": 1.653179190751445,
"grad_norm": 0.01806177943944931,
"learning_rate": 8.441036751137696e-05,
"loss": 0.0122,
"step": 143
},
{
"epoch": 1.6647398843930636,
"grad_norm": 0.014901218004524708,
"learning_rate": 8.411475767063455e-05,
"loss": 0.0075,
"step": 144
},
{
"epoch": 1.6763005780346822,
"grad_norm": 0.01642463356256485,
"learning_rate": 8.381689971904514e-05,
"loss": 0.0098,
"step": 145
},
{
"epoch": 1.6763005780346822,
"eval_loss": 0.008807710371911526,
"eval_runtime": 10.0289,
"eval_samples_per_second": 4.986,
"eval_steps_per_second": 1.296,
"step": 145
},
{
"epoch": 1.6878612716763006,
"grad_norm": 0.0145337525755167,
"learning_rate": 8.35168132849965e-05,
"loss": 0.0074,
"step": 146
},
{
"epoch": 1.699421965317919,
"grad_norm": 0.019870057702064514,
"learning_rate": 8.321451814372997e-05,
"loss": 0.0111,
"step": 147
},
{
"epoch": 1.7109826589595376,
"grad_norm": 0.015163514763116837,
"learning_rate": 8.29100342160374e-05,
"loss": 0.0097,
"step": 148
},
{
"epoch": 1.7225433526011562,
"grad_norm": 0.01458723470568657,
"learning_rate": 8.260338156694836e-05,
"loss": 0.007,
"step": 149
},
{
"epoch": 1.7341040462427746,
"grad_norm": 0.015452525578439236,
"learning_rate": 8.229458040440783e-05,
"loss": 0.0071,
"step": 150
},
{
"epoch": 1.7341040462427746,
"eval_loss": 0.008663667365908623,
"eval_runtime": 10.0348,
"eval_samples_per_second": 4.983,
"eval_steps_per_second": 1.295,
"step": 150
},
{
"epoch": 1.745664739884393,
"grad_norm": 0.017789160832762718,
"learning_rate": 8.198365107794457e-05,
"loss": 0.0098,
"step": 151
},
{
"epoch": 1.7572254335260116,
"grad_norm": 0.019878627732396126,
"learning_rate": 8.167061407733016e-05,
"loss": 0.0077,
"step": 152
},
{
"epoch": 1.7687861271676302,
"grad_norm": 0.018271176144480705,
"learning_rate": 8.135549003122871e-05,
"loss": 0.0096,
"step": 153
},
{
"epoch": 1.7803468208092486,
"grad_norm": 0.017175493761897087,
"learning_rate": 8.103829970583742e-05,
"loss": 0.0111,
"step": 154
},
{
"epoch": 1.791907514450867,
"grad_norm": 0.014682111330330372,
"learning_rate": 8.071906400351822e-05,
"loss": 0.0094,
"step": 155
},
{
"epoch": 1.791907514450867,
"eval_loss": 0.008556585758924484,
"eval_runtime": 10.0386,
"eval_samples_per_second": 4.981,
"eval_steps_per_second": 1.295,
"step": 155
},
{
"epoch": 1.8034682080924855,
"grad_norm": 0.016677409410476685,
"learning_rate": 8.039780396142022e-05,
"loss": 0.0079,
"step": 156
},
{
"epoch": 1.8150289017341041,
"grad_norm": 0.01885165646672249,
"learning_rate": 8.007454075009351e-05,
"loss": 0.0115,
"step": 157
},
{
"epoch": 1.8265895953757225,
"grad_norm": 0.01522767636924982,
"learning_rate": 7.9749295672094e-05,
"loss": 0.0076,
"step": 158
},
{
"epoch": 1.838150289017341,
"grad_norm": 0.01811189390718937,
"learning_rate": 7.942209016057954e-05,
"loss": 0.01,
"step": 159
},
{
"epoch": 1.8497109826589595,
"grad_norm": 0.01717502437531948,
"learning_rate": 7.909294577789766e-05,
"loss": 0.008,
"step": 160
},
{
"epoch": 1.8497109826589595,
"eval_loss": 0.008559009060263634,
"eval_runtime": 10.0652,
"eval_samples_per_second": 4.968,
"eval_steps_per_second": 1.292,
"step": 160
},
{
"epoch": 1.861271676300578,
"grad_norm": 0.015464117750525475,
"learning_rate": 7.876188421416449e-05,
"loss": 0.0068,
"step": 161
},
{
"epoch": 1.8728323699421965,
"grad_norm": 0.015217979438602924,
"learning_rate": 7.842892728583558e-05,
"loss": 0.0074,
"step": 162
},
{
"epoch": 1.8843930635838149,
"grad_norm": 0.01554564293473959,
"learning_rate": 7.809409693426803e-05,
"loss": 0.0071,
"step": 163
},
{
"epoch": 1.8959537572254335,
"grad_norm": 0.01914595626294613,
"learning_rate": 7.775741522427477e-05,
"loss": 0.009,
"step": 164
},
{
"epoch": 1.907514450867052,
"grad_norm": 0.01934712752699852,
"learning_rate": 7.741890434267043e-05,
"loss": 0.01,
"step": 165
},
{
"epoch": 1.907514450867052,
"eval_loss": 0.00854802131652832,
"eval_runtime": 10.0226,
"eval_samples_per_second": 4.989,
"eval_steps_per_second": 1.297,
"step": 165
},
{
"epoch": 1.9190751445086707,
"grad_norm": 0.01625540666282177,
"learning_rate": 7.707858659680924e-05,
"loss": 0.0071,
"step": 166
},
{
"epoch": 1.930635838150289,
"grad_norm": 0.020419662818312645,
"learning_rate": 7.673648441311508e-05,
"loss": 0.0076,
"step": 167
},
{
"epoch": 1.9421965317919074,
"grad_norm": 0.012187760323286057,
"learning_rate": 7.639262033560359e-05,
"loss": 0.0064,
"step": 168
},
{
"epoch": 1.953757225433526,
"grad_norm": 0.015659835189580917,
"learning_rate": 7.604701702439651e-05,
"loss": 0.0074,
"step": 169
},
{
"epoch": 1.9653179190751446,
"grad_norm": 0.017916489392518997,
"learning_rate": 7.56996972542285e-05,
"loss": 0.0084,
"step": 170
},
{
"epoch": 1.9653179190751446,
"eval_loss": 0.008561979979276657,
"eval_runtime": 10.0323,
"eval_samples_per_second": 4.984,
"eval_steps_per_second": 1.296,
"step": 170
},
{
"epoch": 1.976878612716763,
"grad_norm": 0.018545281141996384,
"learning_rate": 7.535068391294617e-05,
"loss": 0.0102,
"step": 171
},
{
"epoch": 1.9884393063583814,
"grad_norm": 0.01769687980413437,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0086,
"step": 172
},
{
"epoch": 2.0,
"grad_norm": 0.021417992189526558,
"learning_rate": 7.464766862492855e-05,
"loss": 0.0092,
"step": 173
},
{
"epoch": 2.0115606936416186,
"grad_norm": 0.01275323610752821,
"learning_rate": 7.42937130058357e-05,
"loss": 0.0069,
"step": 174
},
{
"epoch": 2.023121387283237,
"grad_norm": 0.011270470917224884,
"learning_rate": 7.393815646786046e-05,
"loss": 0.0058,
"step": 175
},
{
"epoch": 2.023121387283237,
"eval_loss": 0.008691162802278996,
"eval_runtime": 10.0831,
"eval_samples_per_second": 4.959,
"eval_steps_per_second": 1.289,
"step": 175
},
{
"epoch": 2.0346820809248554,
"grad_norm": 0.014580226503312588,
"learning_rate": 7.358102244164003e-05,
"loss": 0.0077,
"step": 176
},
{
"epoch": 2.046242774566474,
"grad_norm": 0.014212528243660927,
"learning_rate": 7.322233446176571e-05,
"loss": 0.0074,
"step": 177
},
{
"epoch": 2.0578034682080926,
"grad_norm": 0.01766706444323063,
"learning_rate": 7.286211616523193e-05,
"loss": 0.007,
"step": 178
},
{
"epoch": 2.069364161849711,
"grad_norm": 0.015258056111633778,
"learning_rate": 7.250039128987873e-05,
"loss": 0.0073,
"step": 179
},
{
"epoch": 2.0809248554913293,
"grad_norm": 0.01634743995964527,
"learning_rate": 7.213718367282737e-05,
"loss": 0.0056,
"step": 180
},
{
"epoch": 2.0809248554913293,
"eval_loss": 0.008958614431321621,
"eval_runtime": 10.0215,
"eval_samples_per_second": 4.989,
"eval_steps_per_second": 1.297,
"step": 180
},
{
"epoch": 2.092485549132948,
"grad_norm": 0.014233705587685108,
"learning_rate": 7.177251724890956e-05,
"loss": 0.0056,
"step": 181
},
{
"epoch": 2.1040462427745665,
"grad_norm": 0.013312169350683689,
"learning_rate": 7.14064160490902e-05,
"loss": 0.0055,
"step": 182
},
{
"epoch": 2.115606936416185,
"grad_norm": 0.012955575250089169,
"learning_rate": 7.103890419888367e-05,
"loss": 0.0053,
"step": 183
},
{
"epoch": 2.1271676300578033,
"grad_norm": 0.018879901617765427,
"learning_rate": 7.067000591676416e-05,
"loss": 0.0065,
"step": 184
},
{
"epoch": 2.138728323699422,
"grad_norm": 0.01857951283454895,
"learning_rate": 7.029974551256956e-05,
"loss": 0.0077,
"step": 185
},
{
"epoch": 2.138728323699422,
"eval_loss": 0.008626853115856647,
"eval_runtime": 10.0278,
"eval_samples_per_second": 4.986,
"eval_steps_per_second": 1.296,
"step": 185
},
{
"epoch": 2.1502890173410405,
"grad_norm": 0.015720047056674957,
"learning_rate": 6.992814738589957e-05,
"loss": 0.0057,
"step": 186
},
{
"epoch": 2.161849710982659,
"grad_norm": 0.016932690516114235,
"learning_rate": 6.95552360245078e-05,
"loss": 0.0074,
"step": 187
},
{
"epoch": 2.1734104046242773,
"grad_norm": 0.013904884457588196,
"learning_rate": 6.918103600268799e-05,
"loss": 0.0054,
"step": 188
},
{
"epoch": 2.184971098265896,
"grad_norm": 0.015230257995426655,
"learning_rate": 6.880557197965464e-05,
"loss": 0.0053,
"step": 189
},
{
"epoch": 2.1965317919075145,
"grad_norm": 0.017044221982359886,
"learning_rate": 6.842886869791809e-05,
"loss": 0.0061,
"step": 190
},
{
"epoch": 2.1965317919075145,
"eval_loss": 0.00858032051473856,
"eval_runtime": 10.0219,
"eval_samples_per_second": 4.989,
"eval_steps_per_second": 1.297,
"step": 190
},
{
"epoch": 2.208092485549133,
"grad_norm": 0.01593642681837082,
"learning_rate": 6.805095098165389e-05,
"loss": 0.0056,
"step": 191
},
{
"epoch": 2.2196531791907512,
"grad_norm": 0.018296780064702034,
"learning_rate": 6.767184373506697e-05,
"loss": 0.008,
"step": 192
},
{
"epoch": 2.23121387283237,
"grad_norm": 0.02369534969329834,
"learning_rate": 6.729157194075057e-05,
"loss": 0.0116,
"step": 193
},
{
"epoch": 2.2427745664739884,
"grad_norm": 0.020556915551424026,
"learning_rate": 6.691016065803983e-05,
"loss": 0.0078,
"step": 194
},
{
"epoch": 2.254335260115607,
"grad_norm": 0.01714545115828514,
"learning_rate": 6.652763502136043e-05,
"loss": 0.008,
"step": 195
},
{
"epoch": 2.254335260115607,
"eval_loss": 0.008329370059072971,
"eval_runtime": 10.0304,
"eval_samples_per_second": 4.985,
"eval_steps_per_second": 1.296,
"step": 195
},
{
"epoch": 2.2658959537572256,
"grad_norm": 0.013920610770583153,
"learning_rate": 6.614402023857232e-05,
"loss": 0.0057,
"step": 196
},
{
"epoch": 2.277456647398844,
"grad_norm": 0.014315689913928509,
"learning_rate": 6.57593415893085e-05,
"loss": 0.0062,
"step": 197
},
{
"epoch": 2.2890173410404624,
"grad_norm": 0.01451371144503355,
"learning_rate": 6.537362442330916e-05,
"loss": 0.0059,
"step": 198
},
{
"epoch": 2.300578034682081,
"grad_norm": 0.013728981837630272,
"learning_rate": 6.498689415875121e-05,
"loss": 0.0054,
"step": 199
},
{
"epoch": 2.3121387283236996,
"grad_norm": 0.01417378056794405,
"learning_rate": 6.45991762805732e-05,
"loss": 0.0058,
"step": 200
},
{
"epoch": 2.3121387283236996,
"eval_loss": 0.00831348542124033,
"eval_runtime": 10.0504,
"eval_samples_per_second": 4.975,
"eval_steps_per_second": 1.293,
"step": 200
},
{
"epoch": 2.3236994219653178,
"grad_norm": 0.013795462436974049,
"learning_rate": 6.421049633879588e-05,
"loss": 0.005,
"step": 201
},
{
"epoch": 2.3352601156069364,
"grad_norm": 0.01878678984940052,
"learning_rate": 6.382087994683859e-05,
"loss": 0.0075,
"step": 202
},
{
"epoch": 2.346820809248555,
"grad_norm": 0.01882004365324974,
"learning_rate": 6.343035277983127e-05,
"loss": 0.0068,
"step": 203
},
{
"epoch": 2.3583815028901736,
"grad_norm": 0.015659485012292862,
"learning_rate": 6.303894057292261e-05,
"loss": 0.006,
"step": 204
},
{
"epoch": 2.3699421965317917,
"grad_norm": 0.015219546854496002,
"learning_rate": 6.264666911958404e-05,
"loss": 0.0047,
"step": 205
},
{
"epoch": 2.3699421965317917,
"eval_loss": 0.00836299266666174,
"eval_runtime": 10.0293,
"eval_samples_per_second": 4.985,
"eval_steps_per_second": 1.296,
"step": 205
},
{
"epoch": 2.3815028901734103,
"grad_norm": 0.017362525686621666,
"learning_rate": 6.225356426991007e-05,
"loss": 0.0069,
"step": 206
},
{
"epoch": 2.393063583815029,
"grad_norm": 0.014902186580002308,
"learning_rate": 6.185965192891472e-05,
"loss": 0.0055,
"step": 207
},
{
"epoch": 2.4046242774566475,
"grad_norm": 0.016910729929804802,
"learning_rate": 6.146495805482451e-05,
"loss": 0.0062,
"step": 208
},
{
"epoch": 2.416184971098266,
"grad_norm": 0.014512901194393635,
"learning_rate": 6.106950865736777e-05,
"loss": 0.0052,
"step": 209
},
{
"epoch": 2.4277456647398843,
"grad_norm": 0.017721183598041534,
"learning_rate": 6.0673329796060686e-05,
"loss": 0.0066,
"step": 210
},
{
"epoch": 2.4277456647398843,
"eval_loss": 0.008446984924376011,
"eval_runtime": 10.0239,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 1.297,
"step": 210
},
{
"epoch": 2.439306358381503,
"grad_norm": 0.01806679181754589,
"learning_rate": 6.0276447578490035e-05,
"loss": 0.0063,
"step": 211
},
{
"epoch": 2.4508670520231215,
"grad_norm": 0.01761380210518837,
"learning_rate": 5.987888815859266e-05,
"loss": 0.0058,
"step": 212
},
{
"epoch": 2.4624277456647397,
"grad_norm": 0.015691956505179405,
"learning_rate": 5.9480677734932045e-05,
"loss": 0.0057,
"step": 213
},
{
"epoch": 2.4739884393063583,
"grad_norm": 0.016050070524215698,
"learning_rate": 5.908184254897182e-05,
"loss": 0.0073,
"step": 214
},
{
"epoch": 2.485549132947977,
"grad_norm": 0.013452823273837566,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0055,
"step": 215
},
{
"epoch": 2.485549132947977,
"eval_loss": 0.008248448371887207,
"eval_runtime": 10.0544,
"eval_samples_per_second": 4.973,
"eval_steps_per_second": 1.293,
"step": 215
},
{
"epoch": 2.4971098265895955,
"grad_norm": 0.013310995884239674,
"learning_rate": 5.8282403060129566e-05,
"loss": 0.0064,
"step": 216
},
{
"epoch": 2.508670520231214,
"grad_norm": 0.016307106241583824,
"learning_rate": 5.788185143909868e-05,
"loss": 0.006,
"step": 217
},
{
"epoch": 2.520231213872832,
"grad_norm": 0.014403578825294971,
"learning_rate": 5.7480780415998906e-05,
"loss": 0.0053,
"step": 218
},
{
"epoch": 2.531791907514451,
"grad_norm": 0.015278173610568047,
"learning_rate": 5.707921642080299e-05,
"loss": 0.0067,
"step": 219
},
{
"epoch": 2.5433526011560694,
"grad_norm": 0.01573382504284382,
"learning_rate": 5.66771859159699e-05,
"loss": 0.0056,
"step": 220
},
{
"epoch": 2.5433526011560694,
"eval_loss": 0.008289137855172157,
"eval_runtime": 10.0106,
"eval_samples_per_second": 4.995,
"eval_steps_per_second": 1.299,
"step": 220
},
{
"epoch": 2.5549132947976876,
"grad_norm": 0.014673051424324512,
"learning_rate": 5.6274715394700805e-05,
"loss": 0.0057,
"step": 221
},
{
"epoch": 2.5664739884393066,
"grad_norm": 0.017399994656443596,
"learning_rate": 5.587183137919332e-05,
"loss": 0.0066,
"step": 222
},
{
"epoch": 2.578034682080925,
"grad_norm": 0.015570064075291157,
"learning_rate": 5.546856041889373e-05,
"loss": 0.0062,
"step": 223
},
{
"epoch": 2.5895953757225434,
"grad_norm": 0.012873631902039051,
"learning_rate": 5.5064929088747316e-05,
"loss": 0.0048,
"step": 224
},
{
"epoch": 2.601156069364162,
"grad_norm": 0.01452395599335432,
"learning_rate": 5.46609639874473e-05,
"loss": 0.005,
"step": 225
},
{
"epoch": 2.601156069364162,
"eval_loss": 0.008248119615018368,
"eval_runtime": 10.0518,
"eval_samples_per_second": 4.974,
"eval_steps_per_second": 1.293,
"step": 225
},
{
"epoch": 2.61271676300578,
"grad_norm": 0.015882886946201324,
"learning_rate": 5.4256691735681786e-05,
"loss": 0.006,
"step": 226
},
{
"epoch": 2.6242774566473988,
"grad_norm": 0.01853213645517826,
"learning_rate": 5.385213897437975e-05,
"loss": 0.0072,
"step": 227
},
{
"epoch": 2.6358381502890174,
"grad_norm": 0.01727055385708809,
"learning_rate": 5.344733236295525e-05,
"loss": 0.0056,
"step": 228
},
{
"epoch": 2.647398843930636,
"grad_norm": 0.01715169847011566,
"learning_rate": 5.3042298577550696e-05,
"loss": 0.007,
"step": 229
},
{
"epoch": 2.6589595375722546,
"grad_norm": 0.01657768152654171,
"learning_rate": 5.263706430927895e-05,
"loss": 0.0065,
"step": 230
},
{
"epoch": 2.6589595375722546,
"eval_loss": 0.00817356538027525,
"eval_runtime": 10.0412,
"eval_samples_per_second": 4.979,
"eval_steps_per_second": 1.295,
"step": 230
},
{
"epoch": 2.6705202312138727,
"grad_norm": 0.016935985535383224,
"learning_rate": 5.223165626246432e-05,
"loss": 0.0056,
"step": 231
},
{
"epoch": 2.6820809248554913,
"grad_norm": 0.016983183100819588,
"learning_rate": 5.182610115288295e-05,
"loss": 0.0054,
"step": 232
},
{
"epoch": 2.69364161849711,
"grad_norm": 0.014110002666711807,
"learning_rate": 5.142042570600212e-05,
"loss": 0.0048,
"step": 233
},
{
"epoch": 2.705202312138728,
"grad_norm": 0.015584226697683334,
"learning_rate": 5.1014656655219197e-05,
"loss": 0.0074,
"step": 234
},
{
"epoch": 2.7167630057803467,
"grad_norm": 0.015623894520103931,
"learning_rate": 5.060882074009988e-05,
"loss": 0.0061,
"step": 235
},
{
"epoch": 2.7167630057803467,
"eval_loss": 0.00805756263434887,
"eval_runtime": 10.0337,
"eval_samples_per_second": 4.983,
"eval_steps_per_second": 1.296,
"step": 235
},
{
"epoch": 2.7283236994219653,
"grad_norm": 0.014514243230223656,
"learning_rate": 5.020294470461615e-05,
"loss": 0.005,
"step": 236
},
{
"epoch": 2.739884393063584,
"grad_norm": 0.016889233142137527,
"learning_rate": 4.979705529538385e-05,
"loss": 0.0065,
"step": 237
},
{
"epoch": 2.7514450867052025,
"grad_norm": 0.014339439570903778,
"learning_rate": 4.9391179259900125e-05,
"loss": 0.0047,
"step": 238
},
{
"epoch": 2.7630057803468207,
"grad_norm": 0.019386202096939087,
"learning_rate": 4.8985343344780815e-05,
"loss": 0.0055,
"step": 239
},
{
"epoch": 2.7745664739884393,
"grad_norm": 0.016825564205646515,
"learning_rate": 4.857957429399788e-05,
"loss": 0.0052,
"step": 240
},
{
"epoch": 2.7745664739884393,
"eval_loss": 0.008152703754603863,
"eval_runtime": 10.0237,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 1.297,
"step": 240
},
{
"epoch": 2.786127167630058,
"grad_norm": 0.018642200157046318,
"learning_rate": 4.817389884711705e-05,
"loss": 0.0054,
"step": 241
},
{
"epoch": 2.7976878612716765,
"grad_norm": 0.017443155869841576,
"learning_rate": 4.776834373753569e-05,
"loss": 0.0066,
"step": 242
},
{
"epoch": 2.809248554913295,
"grad_norm": 0.017855728045105934,
"learning_rate": 4.7362935690721076e-05,
"loss": 0.0064,
"step": 243
},
{
"epoch": 2.820809248554913,
"grad_norm": 0.013038886711001396,
"learning_rate": 4.695770142244931e-05,
"loss": 0.0054,
"step": 244
},
{
"epoch": 2.832369942196532,
"grad_norm": 0.0145301828160882,
"learning_rate": 4.655266763704476e-05,
"loss": 0.0053,
"step": 245
},
{
"epoch": 2.832369942196532,
"eval_loss": 0.008140629157423973,
"eval_runtime": 10.0264,
"eval_samples_per_second": 4.987,
"eval_steps_per_second": 1.297,
"step": 245
},
{
"epoch": 2.8439306358381504,
"grad_norm": 0.015260045416653156,
"learning_rate": 4.614786102562026e-05,
"loss": 0.0048,
"step": 246
},
{
"epoch": 2.8554913294797686,
"grad_norm": 0.014731310307979584,
"learning_rate": 4.574330826431821e-05,
"loss": 0.005,
"step": 247
},
{
"epoch": 2.867052023121387,
"grad_norm": 0.017430312931537628,
"learning_rate": 4.5339036012552716e-05,
"loss": 0.0062,
"step": 248
},
{
"epoch": 2.878612716763006,
"grad_norm": 0.01655244641005993,
"learning_rate": 4.493507091125269e-05,
"loss": 0.0056,
"step": 249
},
{
"epoch": 2.8901734104046244,
"grad_norm": 0.016315851360559464,
"learning_rate": 4.4531439581106295e-05,
"loss": 0.0058,
"step": 250
},
{
"epoch": 2.8901734104046244,
"eval_loss": 0.007892865687608719,
"eval_runtime": 10.0251,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 1.297,
"step": 250
},
{
"epoch": 2.901734104046243,
"grad_norm": 0.014720520935952663,
"learning_rate": 4.412816862080668e-05,
"loss": 0.0064,
"step": 251
},
{
"epoch": 2.913294797687861,
"grad_norm": 0.015001147985458374,
"learning_rate": 4.37252846052992e-05,
"loss": 0.0057,
"step": 252
},
{
"epoch": 2.9248554913294798,
"grad_norm": 0.015956144779920578,
"learning_rate": 4.332281408403011e-05,
"loss": 0.0056,
"step": 253
},
{
"epoch": 2.9364161849710984,
"grad_norm": 0.016037022694945335,
"learning_rate": 4.292078357919701e-05,
"loss": 0.0059,
"step": 254
},
{
"epoch": 2.9479768786127165,
"grad_norm": 0.014494068920612335,
"learning_rate": 4.25192195840011e-05,
"loss": 0.0052,
"step": 255
},
{
"epoch": 2.9479768786127165,
"eval_loss": 0.007827498018741608,
"eval_runtime": 10.0269,
"eval_samples_per_second": 4.987,
"eval_steps_per_second": 1.297,
"step": 255
},
{
"epoch": 2.959537572254335,
"grad_norm": 0.015506073832511902,
"learning_rate": 4.2118148560901325e-05,
"loss": 0.0066,
"step": 256
},
{
"epoch": 2.9710982658959537,
"grad_norm": 0.01482071541249752,
"learning_rate": 4.171759693987046e-05,
"loss": 0.0062,
"step": 257
},
{
"epoch": 2.9826589595375723,
"grad_norm": 0.0164498183876276,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0056,
"step": 258
},
{
"epoch": 2.994219653179191,
"grad_norm": 0.014011871069669724,
"learning_rate": 4.0918157451028185e-05,
"loss": 0.0053,
"step": 259
},
{
"epoch": 3.005780346820809,
"grad_norm": 0.024348195642232895,
"learning_rate": 4.051932226506797e-05,
"loss": 0.0071,
"step": 260
},
{
"epoch": 3.005780346820809,
"eval_loss": 0.007992882281541824,
"eval_runtime": 10.0301,
"eval_samples_per_second": 4.985,
"eval_steps_per_second": 1.296,
"step": 260
},
{
"epoch": 3.0173410404624277,
"grad_norm": 0.018887817859649658,
"learning_rate": 4.012111184140734e-05,
"loss": 0.0051,
"step": 261
},
{
"epoch": 3.0289017341040463,
"grad_norm": 0.017569325864315033,
"learning_rate": 3.972355242150998e-05,
"loss": 0.0051,
"step": 262
},
{
"epoch": 3.040462427745665,
"grad_norm": 0.013190064579248428,
"learning_rate": 3.932667020393933e-05,
"loss": 0.0039,
"step": 263
},
{
"epoch": 3.052023121387283,
"grad_norm": 0.015570548363029957,
"learning_rate": 3.893049134263224e-05,
"loss": 0.0035,
"step": 264
},
{
"epoch": 3.0635838150289016,
"grad_norm": 0.018783550709486008,
"learning_rate": 3.8535041945175506e-05,
"loss": 0.0051,
"step": 265
},
{
"epoch": 3.0635838150289016,
"eval_loss": 0.008158711716532707,
"eval_runtime": 10.0165,
"eval_samples_per_second": 4.992,
"eval_steps_per_second": 1.298,
"step": 265
},
{
"epoch": 3.0751445086705202,
"grad_norm": 0.01301144901663065,
"learning_rate": 3.814034807108529e-05,
"loss": 0.0034,
"step": 266
},
{
"epoch": 3.086705202312139,
"grad_norm": 0.019954444840550423,
"learning_rate": 3.774643573008995e-05,
"loss": 0.0067,
"step": 267
},
{
"epoch": 3.098265895953757,
"grad_norm": 0.01796858012676239,
"learning_rate": 3.735333088041596e-05,
"loss": 0.0037,
"step": 268
},
{
"epoch": 3.1098265895953756,
"grad_norm": 0.016962885856628418,
"learning_rate": 3.69610594270774e-05,
"loss": 0.0039,
"step": 269
},
{
"epoch": 3.121387283236994,
"grad_norm": 0.017634421586990356,
"learning_rate": 3.656964722016875e-05,
"loss": 0.0033,
"step": 270
},
{
"epoch": 3.121387283236994,
"eval_loss": 0.00860854797065258,
"eval_runtime": 10.0238,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 1.297,
"step": 270
},
{
"epoch": 3.132947976878613,
"grad_norm": 0.019069235771894455,
"learning_rate": 3.6179120053161416e-05,
"loss": 0.0044,
"step": 271
},
{
"epoch": 3.1445086705202314,
"grad_norm": 0.01804421842098236,
"learning_rate": 3.578950366120414e-05,
"loss": 0.004,
"step": 272
},
{
"epoch": 3.1560693641618496,
"grad_norm": 0.016116416081786156,
"learning_rate": 3.5400823719426815e-05,
"loss": 0.0038,
"step": 273
},
{
"epoch": 3.167630057803468,
"grad_norm": 0.019759787246584892,
"learning_rate": 3.5013105841248795e-05,
"loss": 0.0036,
"step": 274
},
{
"epoch": 3.179190751445087,
"grad_norm": 0.014977723360061646,
"learning_rate": 3.462637557669084e-05,
"loss": 0.004,
"step": 275
},
{
"epoch": 3.179190751445087,
"eval_loss": 0.008404241874814034,
"eval_runtime": 10.0295,
"eval_samples_per_second": 4.985,
"eval_steps_per_second": 1.296,
"step": 275
},
{
"epoch": 3.1907514450867054,
"grad_norm": 0.016791895031929016,
"learning_rate": 3.424065841069152e-05,
"loss": 0.0049,
"step": 276
},
{
"epoch": 3.2023121387283235,
"grad_norm": 0.013895588926970959,
"learning_rate": 3.38559797614277e-05,
"loss": 0.0039,
"step": 277
},
{
"epoch": 3.213872832369942,
"grad_norm": 0.017442645505070686,
"learning_rate": 3.347236497863957e-05,
"loss": 0.0042,
"step": 278
},
{
"epoch": 3.2254335260115607,
"grad_norm": 0.018163053318858147,
"learning_rate": 3.308983934196018e-05,
"loss": 0.0044,
"step": 279
},
{
"epoch": 3.2369942196531793,
"grad_norm": 0.017282122746109962,
"learning_rate": 3.2708428059249436e-05,
"loss": 0.0032,
"step": 280
},
{
"epoch": 3.2369942196531793,
"eval_loss": 0.008181248791515827,
"eval_runtime": 10.0205,
"eval_samples_per_second": 4.99,
"eval_steps_per_second": 1.297,
"step": 280
},
{
"epoch": 3.2485549132947975,
"grad_norm": 0.017854265868663788,
"learning_rate": 3.232815626493304e-05,
"loss": 0.0054,
"step": 281
},
{
"epoch": 3.260115606936416,
"grad_norm": 0.0203102994710207,
"learning_rate": 3.1949049018346126e-05,
"loss": 0.0052,
"step": 282
},
{
"epoch": 3.2716763005780347,
"grad_norm": 0.015286475419998169,
"learning_rate": 3.157113130208191e-05,
"loss": 0.0049,
"step": 283
},
{
"epoch": 3.2832369942196533,
"grad_norm": 0.014563402161002159,
"learning_rate": 3.1194428020345376e-05,
"loss": 0.0039,
"step": 284
},
{
"epoch": 3.294797687861272,
"grad_norm": 0.016170360147953033,
"learning_rate": 3.081896399731202e-05,
"loss": 0.0042,
"step": 285
},
{
"epoch": 3.294797687861272,
"eval_loss": 0.008185806684195995,
"eval_runtime": 10.0071,
"eval_samples_per_second": 4.996,
"eval_steps_per_second": 1.299,
"step": 285
},
{
"epoch": 3.30635838150289,
"grad_norm": 0.017802009359002113,
"learning_rate": 3.0444763975492208e-05,
"loss": 0.0045,
"step": 286
},
{
"epoch": 3.3179190751445087,
"grad_norm": 0.013784883543848991,
"learning_rate": 3.0071852614100426e-05,
"loss": 0.0036,
"step": 287
},
{
"epoch": 3.3294797687861273,
"grad_norm": 0.01664113625884056,
"learning_rate": 2.9700254487430444e-05,
"loss": 0.0044,
"step": 288
},
{
"epoch": 3.3410404624277454,
"grad_norm": 0.01927308365702629,
"learning_rate": 2.9329994083235857e-05,
"loss": 0.0055,
"step": 289
},
{
"epoch": 3.352601156069364,
"grad_norm": 0.01657339371740818,
"learning_rate": 2.896109580111634e-05,
"loss": 0.0035,
"step": 290
},
{
"epoch": 3.352601156069364,
"eval_loss": 0.008166169747710228,
"eval_runtime": 10.0143,
"eval_samples_per_second": 4.993,
"eval_steps_per_second": 1.298,
"step": 290
},
{
"epoch": 3.3641618497109826,
"grad_norm": 0.017788324505090714,
"learning_rate": 2.859358395090983e-05,
"loss": 0.0042,
"step": 291
},
{
"epoch": 3.3757225433526012,
"grad_norm": 0.016261830925941467,
"learning_rate": 2.8227482751090445e-05,
"loss": 0.0043,
"step": 292
},
{
"epoch": 3.38728323699422,
"grad_norm": 0.01472635380923748,
"learning_rate": 2.7862816327172636e-05,
"loss": 0.0037,
"step": 293
},
{
"epoch": 3.398843930635838,
"grad_norm": 0.016420332714915276,
"learning_rate": 2.7499608710121288e-05,
"loss": 0.0042,
"step": 294
},
{
"epoch": 3.4104046242774566,
"grad_norm": 0.016155356541275978,
"learning_rate": 2.7137883834768073e-05,
"loss": 0.0041,
"step": 295
},
{
"epoch": 3.4104046242774566,
"eval_loss": 0.008127645589411259,
"eval_runtime": 10.0238,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 1.297,
"step": 295
},
{
"epoch": 3.421965317919075,
"grad_norm": 0.016076598316431046,
"learning_rate": 2.6777665538234293e-05,
"loss": 0.004,
"step": 296
},
{
"epoch": 3.433526011560694,
"grad_norm": 0.017685944214463234,
"learning_rate": 2.641897755835997e-05,
"loss": 0.0046,
"step": 297
},
{
"epoch": 3.445086705202312,
"grad_norm": 0.03867075592279434,
"learning_rate": 2.606184353213956e-05,
"loss": 0.0039,
"step": 298
},
{
"epoch": 3.4566473988439306,
"grad_norm": 0.014819984324276447,
"learning_rate": 2.5706286994164315e-05,
"loss": 0.0037,
"step": 299
},
{
"epoch": 3.468208092485549,
"grad_norm": 0.016593076288700104,
"learning_rate": 2.5352331375071437e-05,
"loss": 0.0048,
"step": 300
},
{
"epoch": 3.468208092485549,
"eval_loss": 0.007986278273165226,
"eval_runtime": 10.0089,
"eval_samples_per_second": 4.996,
"eval_steps_per_second": 1.299,
"step": 300
},
{
"epoch": 3.479768786127168,
"grad_norm": 0.015115424059331417,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0034,
"step": 301
},
{
"epoch": 3.491329479768786,
"grad_norm": 0.016738681122660637,
"learning_rate": 2.4649316087053837e-05,
"loss": 0.0041,
"step": 302
},
{
"epoch": 3.5028901734104045,
"grad_norm": 0.014471864327788353,
"learning_rate": 2.4300302745771507e-05,
"loss": 0.0034,
"step": 303
},
{
"epoch": 3.514450867052023,
"grad_norm": 0.015291919000446796,
"learning_rate": 2.3952982975603496e-05,
"loss": 0.0043,
"step": 304
},
{
"epoch": 3.5260115606936417,
"grad_norm": 0.017901018261909485,
"learning_rate": 2.360737966439641e-05,
"loss": 0.0046,
"step": 305
},
{
"epoch": 3.5260115606936417,
"eval_loss": 0.00799076072871685,
"eval_runtime": 10.1032,
"eval_samples_per_second": 4.949,
"eval_steps_per_second": 1.287,
"step": 305
},
{
"epoch": 3.5375722543352603,
"grad_norm": 0.017450664192438126,
"learning_rate": 2.326351558688493e-05,
"loss": 0.0047,
"step": 306
},
{
"epoch": 3.5491329479768785,
"grad_norm": 0.017974600195884705,
"learning_rate": 2.2921413403190772e-05,
"loss": 0.0046,
"step": 307
},
{
"epoch": 3.560693641618497,
"grad_norm": 0.014545281417667866,
"learning_rate": 2.2581095657329598e-05,
"loss": 0.0033,
"step": 308
},
{
"epoch": 3.5722543352601157,
"grad_norm": 0.015802579000592232,
"learning_rate": 2.224258477572524e-05,
"loss": 0.0037,
"step": 309
},
{
"epoch": 3.583815028901734,
"grad_norm": 0.017255526036024094,
"learning_rate": 2.1905903065731974e-05,
"loss": 0.004,
"step": 310
},
{
"epoch": 3.583815028901734,
"eval_loss": 0.007966941222548485,
"eval_runtime": 10.0846,
"eval_samples_per_second": 4.958,
"eval_steps_per_second": 1.289,
"step": 310
},
{
"epoch": 3.5953757225433525,
"grad_norm": 0.014843069948256016,
"learning_rate": 2.1571072714164443e-05,
"loss": 0.0035,
"step": 311
},
{
"epoch": 3.606936416184971,
"grad_norm": 0.01944170519709587,
"learning_rate": 2.123811578583551e-05,
"loss": 0.0056,
"step": 312
},
{
"epoch": 3.6184971098265897,
"grad_norm": 0.017738202586770058,
"learning_rate": 2.090705422210237e-05,
"loss": 0.0043,
"step": 313
},
{
"epoch": 3.6300578034682083,
"grad_norm": 0.018833834677934647,
"learning_rate": 2.057790983942047e-05,
"loss": 0.0049,
"step": 314
},
{
"epoch": 3.6416184971098264,
"grad_norm": 0.013943798840045929,
"learning_rate": 2.0250704327906024e-05,
"loss": 0.0032,
"step": 315
},
{
"epoch": 3.6416184971098264,
"eval_loss": 0.008140211924910545,
"eval_runtime": 10.0297,
"eval_samples_per_second": 4.985,
"eval_steps_per_second": 1.296,
"step": 315
},
{
"epoch": 3.653179190751445,
"grad_norm": 0.015995629131793976,
"learning_rate": 1.9925459249906485e-05,
"loss": 0.0037,
"step": 316
},
{
"epoch": 3.6647398843930636,
"grad_norm": 0.017212923616170883,
"learning_rate": 1.960219603857977e-05,
"loss": 0.0048,
"step": 317
},
{
"epoch": 3.6763005780346822,
"grad_norm": 0.019134720787405968,
"learning_rate": 1.928093599648179e-05,
"loss": 0.0047,
"step": 318
},
{
"epoch": 3.687861271676301,
"grad_norm": 0.016614330932497978,
"learning_rate": 1.8961700294162577e-05,
"loss": 0.0034,
"step": 319
},
{
"epoch": 3.699421965317919,
"grad_norm": 0.017316371202468872,
"learning_rate": 1.86445099687713e-05,
"loss": 0.0039,
"step": 320
},
{
"epoch": 3.699421965317919,
"eval_loss": 0.008361711166799068,
"eval_runtime": 10.0095,
"eval_samples_per_second": 4.995,
"eval_steps_per_second": 1.299,
"step": 320
},
{
"epoch": 3.7109826589595376,
"grad_norm": 0.015041043981909752,
"learning_rate": 1.832938592266984e-05,
"loss": 0.0037,
"step": 321
},
{
"epoch": 3.722543352601156,
"grad_norm": 0.0185141172260046,
"learning_rate": 1.801634892205545e-05,
"loss": 0.0041,
"step": 322
},
{
"epoch": 3.7341040462427744,
"grad_norm": 0.017875155434012413,
"learning_rate": 1.7705419595592193e-05,
"loss": 0.0037,
"step": 323
},
{
"epoch": 3.745664739884393,
"grad_norm": 0.015563211403787136,
"learning_rate": 1.7396618433051647e-05,
"loss": 0.0043,
"step": 324
},
{
"epoch": 3.7572254335260116,
"grad_norm": 0.019554760307073593,
"learning_rate": 1.7089965783962608e-05,
"loss": 0.0042,
"step": 325
},
{
"epoch": 3.7572254335260116,
"eval_loss": 0.008260131813585758,
"eval_runtime": 10.0115,
"eval_samples_per_second": 4.994,
"eval_steps_per_second": 1.299,
"step": 325
},
{
"epoch": 3.76878612716763,
"grad_norm": 0.016527488827705383,
"learning_rate": 1.678548185627004e-05,
"loss": 0.0043,
"step": 326
},
{
"epoch": 3.7803468208092488,
"grad_norm": 0.015184947289526463,
"learning_rate": 1.6483186715003523e-05,
"loss": 0.0035,
"step": 327
},
{
"epoch": 3.791907514450867,
"grad_norm": 0.020948296412825584,
"learning_rate": 1.618310028095486e-05,
"loss": 0.0039,
"step": 328
},
{
"epoch": 3.8034682080924855,
"grad_norm": 0.014377256855368614,
"learning_rate": 1.5885242329365448e-05,
"loss": 0.0033,
"step": 329
},
{
"epoch": 3.815028901734104,
"grad_norm": 0.017349926754832268,
"learning_rate": 1.5589632488623053e-05,
"loss": 0.0046,
"step": 330
},
{
"epoch": 3.815028901734104,
"eval_loss": 0.008042494766414165,
"eval_runtime": 10.1315,
"eval_samples_per_second": 4.935,
"eval_steps_per_second": 1.283,
"step": 330
},
{
"epoch": 3.8265895953757223,
"grad_norm": 0.014084051363170147,
"learning_rate": 1.5296290238968303e-05,
"loss": 0.0032,
"step": 331
},
{
"epoch": 3.838150289017341,
"grad_norm": 0.016601048409938812,
"learning_rate": 1.500523491121108e-05,
"loss": 0.0035,
"step": 332
},
{
"epoch": 3.8497109826589595,
"grad_norm": 0.015788624063134193,
"learning_rate": 1.47164856854565e-05,
"loss": 0.0038,
"step": 333
},
{
"epoch": 3.861271676300578,
"grad_norm": 0.014880317263305187,
"learning_rate": 1.4430061589841121e-05,
"loss": 0.0034,
"step": 334
},
{
"epoch": 3.8728323699421967,
"grad_norm": 0.01920904405415058,
"learning_rate": 1.4145981499278876e-05,
"loss": 0.0035,
"step": 335
},
{
"epoch": 3.8728323699421967,
"eval_loss": 0.008081013336777687,
"eval_runtime": 10.0106,
"eval_samples_per_second": 4.995,
"eval_steps_per_second": 1.299,
"step": 335
},
{
"epoch": 3.884393063583815,
"grad_norm": 0.016501102596521378,
"learning_rate": 1.386426413421738e-05,
"loss": 0.004,
"step": 336
},
{
"epoch": 3.8959537572254335,
"grad_norm": 0.016265930607914925,
"learning_rate": 1.3584928059404205e-05,
"loss": 0.0034,
"step": 337
},
{
"epoch": 3.907514450867052,
"grad_norm": 0.019726134836673737,
"learning_rate": 1.3307991682663462e-05,
"loss": 0.0031,
"step": 338
},
{
"epoch": 3.9190751445086707,
"grad_norm": 0.016232285648584366,
"learning_rate": 1.3033473253682848e-05,
"loss": 0.0034,
"step": 339
},
{
"epoch": 3.9306358381502893,
"grad_norm": 0.02013998292386532,
"learning_rate": 1.2761390862810907e-05,
"loss": 0.0048,
"step": 340
},
{
"epoch": 3.9306358381502893,
"eval_loss": 0.008075674995779991,
"eval_runtime": 10.0299,
"eval_samples_per_second": 4.985,
"eval_steps_per_second": 1.296,
"step": 340
},
{
"epoch": 3.9421965317919074,
"grad_norm": 0.013452830724418163,
"learning_rate": 1.2491762439865035e-05,
"loss": 0.003,
"step": 341
},
{
"epoch": 3.953757225433526,
"grad_norm": 0.012992053292691708,
"learning_rate": 1.2224605752949786e-05,
"loss": 0.0028,
"step": 342
},
{
"epoch": 3.9653179190751446,
"grad_norm": 0.015416258946061134,
"learning_rate": 1.1959938407286097e-05,
"loss": 0.0037,
"step": 343
},
{
"epoch": 3.976878612716763,
"grad_norm": 0.015255059115588665,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0036,
"step": 344
},
{
"epoch": 3.9884393063583814,
"grad_norm": 0.018186306580901146,
"learning_rate": 1.143814133922872e-05,
"loss": 0.0056,
"step": 345
},
{
"epoch": 3.9884393063583814,
"eval_loss": 0.008018155582249165,
"eval_runtime": 10.115,
"eval_samples_per_second": 4.943,
"eval_steps_per_second": 1.285,
"step": 345
},
{
"epoch": 4.0,
"grad_norm": 0.026972023770213127,
"learning_rate": 1.118104600247129e-05,
"loss": 0.0065,
"step": 346
},
{
"epoch": 4.011560693641618,
"grad_norm": 0.01358290296047926,
"learning_rate": 1.0926508775971994e-05,
"loss": 0.0029,
"step": 347
},
{
"epoch": 4.023121387283237,
"grad_norm": 0.011624607257544994,
"learning_rate": 1.0674546433348454e-05,
"loss": 0.0028,
"step": 348
},
{
"epoch": 4.034682080924855,
"grad_norm": 0.012059401720762253,
"learning_rate": 1.0425175578537299e-05,
"loss": 0.0033,
"step": 349
},
{
"epoch": 4.046242774566474,
"grad_norm": 0.011458152905106544,
"learning_rate": 1.0178412644700092e-05,
"loss": 0.0025,
"step": 350
},
{
"epoch": 4.046242774566474,
"eval_loss": 0.008002113550901413,
"eval_runtime": 10.1777,
"eval_samples_per_second": 4.913,
"eval_steps_per_second": 1.277,
"step": 350
},
{
"epoch": 4.057803468208093,
"grad_norm": 0.012259789742529392,
"learning_rate": 9.934273893140334e-06,
"loss": 0.0027,
"step": 351
},
{
"epoch": 4.069364161849711,
"grad_norm": 0.013933391310274601,
"learning_rate": 9.692775412231863e-06,
"loss": 0.0027,
"step": 352
},
{
"epoch": 4.08092485549133,
"grad_norm": 0.012962523847818375,
"learning_rate": 9.453933116358715e-06,
"loss": 0.0024,
"step": 353
},
{
"epoch": 4.092485549132948,
"grad_norm": 0.01309504546225071,
"learning_rate": 9.21776274486636e-06,
"loss": 0.0028,
"step": 354
},
{
"epoch": 4.104046242774566,
"grad_norm": 0.015043212100863457,
"learning_rate": 8.984279861024453e-06,
"loss": 0.0035,
"step": 355
},
{
"epoch": 4.104046242774566,
"eval_loss": 0.008168390020728111,
"eval_runtime": 10.0257,
"eval_samples_per_second": 4.987,
"eval_steps_per_second": 1.297,
"step": 355
},
{
"epoch": 4.115606936416185,
"grad_norm": 0.014944328926503658,
"learning_rate": 8.75349985100134e-06,
"loss": 0.0034,
"step": 356
},
{
"epoch": 4.127167630057803,
"grad_norm": 0.016272595152258873,
"learning_rate": 8.525437922850032e-06,
"loss": 0.0025,
"step": 357
},
{
"epoch": 4.138728323699422,
"grad_norm": 0.013790381141006947,
"learning_rate": 8.30010910550611e-06,
"loss": 0.0025,
"step": 358
},
{
"epoch": 4.1502890173410405,
"grad_norm": 0.013851807452738285,
"learning_rate": 8.077528247797234e-06,
"loss": 0.0029,
"step": 359
},
{
"epoch": 4.161849710982659,
"grad_norm": 0.013270116411149502,
"learning_rate": 7.857710017464737e-06,
"loss": 0.0028,
"step": 360
},
{
"epoch": 4.161849710982659,
"eval_loss": 0.00829649344086647,
"eval_runtime": 10.0,
"eval_samples_per_second": 5.0,
"eval_steps_per_second": 1.3,
"step": 360
},
{
"epoch": 4.173410404624278,
"grad_norm": 0.017382489517331123,
"learning_rate": 7.640668900196984e-06,
"loss": 0.0041,
"step": 361
},
{
"epoch": 4.184971098265896,
"grad_norm": 0.014215102419257164,
"learning_rate": 7.426419198674772e-06,
"loss": 0.0025,
"step": 362
},
{
"epoch": 4.196531791907514,
"grad_norm": 0.01837097853422165,
"learning_rate": 7.214975031628857e-06,
"loss": 0.0036,
"step": 363
},
{
"epoch": 4.208092485549133,
"grad_norm": 0.01614932157099247,
"learning_rate": 7.006350332909495e-06,
"loss": 0.0027,
"step": 364
},
{
"epoch": 4.219653179190751,
"grad_norm": 0.016307765617966652,
"learning_rate": 6.800558850568295e-06,
"loss": 0.0028,
"step": 365
},
{
"epoch": 4.219653179190751,
"eval_loss": 0.008427992463111877,
"eval_runtime": 10.0341,
"eval_samples_per_second": 4.983,
"eval_steps_per_second": 1.296,
"step": 365
},
{
"epoch": 4.23121387283237,
"grad_norm": 0.024037158116698265,
"learning_rate": 6.5976141459521355e-06,
"loss": 0.0026,
"step": 366
},
{
"epoch": 4.242774566473988,
"grad_norm": 0.018489433452486992,
"learning_rate": 6.397529592809614e-06,
"loss": 0.0034,
"step": 367
},
{
"epoch": 4.254335260115607,
"grad_norm": 0.013452818617224693,
"learning_rate": 6.2003183764096695e-06,
"loss": 0.0025,
"step": 368
},
{
"epoch": 4.265895953757226,
"grad_norm": 0.017129331827163696,
"learning_rate": 6.005993492672657e-06,
"loss": 0.0037,
"step": 369
},
{
"epoch": 4.277456647398844,
"grad_norm": 0.01633543334901333,
"learning_rate": 5.814567747314048e-06,
"loss": 0.003,
"step": 370
},
{
"epoch": 4.277456647398844,
"eval_loss": 0.008522397838532925,
"eval_runtime": 10.0223,
"eval_samples_per_second": 4.989,
"eval_steps_per_second": 1.297,
"step": 370
},
{
"epoch": 4.289017341040463,
"grad_norm": 0.013097944669425488,
"learning_rate": 5.626053755000421e-06,
"loss": 0.0021,
"step": 371
},
{
"epoch": 4.300578034682081,
"grad_norm": 0.016527358442544937,
"learning_rate": 5.440463938518303e-06,
"loss": 0.003,
"step": 372
},
{
"epoch": 4.312138728323699,
"grad_norm": 0.015918167307972908,
"learning_rate": 5.257810527955409e-06,
"loss": 0.0027,
"step": 373
},
{
"epoch": 4.323699421965318,
"grad_norm": 0.01487621571868658,
"learning_rate": 5.078105559894791e-06,
"loss": 0.0025,
"step": 374
},
{
"epoch": 4.335260115606936,
"grad_norm": 0.01791987195611,
"learning_rate": 4.901360876621597e-06,
"loss": 0.0033,
"step": 375
},
{
"epoch": 4.335260115606936,
"eval_loss": 0.008536826819181442,
"eval_runtime": 10.0084,
"eval_samples_per_second": 4.996,
"eval_steps_per_second": 1.299,
"step": 375
},
{
"epoch": 4.3468208092485545,
"grad_norm": 0.013367927633225918,
"learning_rate": 4.727588125342669e-06,
"loss": 0.0022,
"step": 376
},
{
"epoch": 4.358381502890174,
"grad_norm": 0.013060873374342918,
"learning_rate": 4.556798757419068e-06,
"loss": 0.0025,
"step": 377
},
{
"epoch": 4.369942196531792,
"grad_norm": 0.014499946497380733,
"learning_rate": 4.389004027611404e-06,
"loss": 0.0027,
"step": 378
},
{
"epoch": 4.381502890173411,
"grad_norm": 0.01943526417016983,
"learning_rate": 4.224214993338149e-06,
"loss": 0.004,
"step": 379
},
{
"epoch": 4.393063583815029,
"grad_norm": 0.019275827333331108,
"learning_rate": 4.062442513947007e-06,
"loss": 0.003,
"step": 380
},
{
"epoch": 4.393063583815029,
"eval_loss": 0.008570068515837193,
"eval_runtime": 10.0111,
"eval_samples_per_second": 4.994,
"eval_steps_per_second": 1.299,
"step": 380
},
{
"epoch": 4.404624277456647,
"grad_norm": 0.01620578207075596,
"learning_rate": 3.903697249999289e-06,
"loss": 0.0031,
"step": 381
},
{
"epoch": 4.416184971098266,
"grad_norm": 0.01429419033229351,
"learning_rate": 3.7479896625674027e-06,
"loss": 0.0026,
"step": 382
},
{
"epoch": 4.427745664739884,
"grad_norm": 0.015918832272291183,
"learning_rate": 3.595330012545445e-06,
"loss": 0.0026,
"step": 383
},
{
"epoch": 4.4393063583815024,
"grad_norm": 0.019388854503631592,
"learning_rate": 3.445728359973094e-06,
"loss": 0.0029,
"step": 384
},
{
"epoch": 4.4508670520231215,
"grad_norm": 0.012549116276204586,
"learning_rate": 3.299194563372604e-06,
"loss": 0.0022,
"step": 385
},
{
"epoch": 4.4508670520231215,
"eval_loss": 0.008573681116104126,
"eval_runtime": 10.0229,
"eval_samples_per_second": 4.989,
"eval_steps_per_second": 1.297,
"step": 385
},
{
"epoch": 4.46242774566474,
"grad_norm": 0.01509170513600111,
"learning_rate": 3.1557382790991687e-06,
"loss": 0.0026,
"step": 386
},
{
"epoch": 4.473988439306359,
"grad_norm": 0.014067728072404861,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.0025,
"step": 387
},
{
"epoch": 4.485549132947977,
"grad_norm": 0.028946029022336006,
"learning_rate": 2.878095858314278e-06,
"loss": 0.0027,
"step": 388
},
{
"epoch": 4.497109826589595,
"grad_norm": 0.01480345893651247,
"learning_rate": 2.743928018017744e-06,
"loss": 0.0025,
"step": 389
},
{
"epoch": 4.508670520231214,
"grad_norm": 0.02504677325487137,
"learning_rate": 2.6128742812723704e-06,
"loss": 0.0028,
"step": 390
},
{
"epoch": 4.508670520231214,
"eval_loss": 0.008553516119718552,
"eval_runtime": 10.0039,
"eval_samples_per_second": 4.998,
"eval_steps_per_second": 1.299,
"step": 390
},
{
"epoch": 4.520231213872832,
"grad_norm": 0.016089266166090965,
"learning_rate": 2.4849432843208785e-06,
"loss": 0.0033,
"step": 391
},
{
"epoch": 4.531791907514451,
"grad_norm": 0.015426091849803925,
"learning_rate": 2.3601434576221546e-06,
"loss": 0.0029,
"step": 392
},
{
"epoch": 4.543352601156069,
"grad_norm": 0.017408043146133423,
"learning_rate": 2.238483025295707e-06,
"loss": 0.0031,
"step": 393
},
{
"epoch": 4.554913294797688,
"grad_norm": 0.017079446464776993,
"learning_rate": 2.1199700045797077e-06,
"loss": 0.0027,
"step": 394
},
{
"epoch": 4.566473988439307,
"grad_norm": 0.016879020258784294,
"learning_rate": 2.0046122053026694e-06,
"loss": 0.0028,
"step": 395
},
{
"epoch": 4.566473988439307,
"eval_loss": 0.008501513861119747,
"eval_runtime": 10.036,
"eval_samples_per_second": 4.982,
"eval_steps_per_second": 1.295,
"step": 395
},
{
"epoch": 4.578034682080925,
"grad_norm": 0.030611367896199226,
"learning_rate": 1.8924172293688147e-06,
"loss": 0.0032,
"step": 396
},
{
"epoch": 4.589595375722544,
"grad_norm": 0.017865322530269623,
"learning_rate": 1.7833924702570725e-06,
"loss": 0.0035,
"step": 397
},
{
"epoch": 4.601156069364162,
"grad_norm": 0.014931568875908852,
"learning_rate": 1.6775451125338959e-06,
"loss": 0.0025,
"step": 398
},
{
"epoch": 4.61271676300578,
"grad_norm": 0.015031742863357067,
"learning_rate": 1.5748821313798124e-06,
"loss": 0.0024,
"step": 399
},
{
"epoch": 4.624277456647399,
"grad_norm": 0.017130030319094658,
"learning_rate": 1.4754102921297364e-06,
"loss": 0.0031,
"step": 400
},
{
"epoch": 4.624277456647399,
"eval_loss": 0.008478561416268349,
"eval_runtime": 10.0323,
"eval_samples_per_second": 4.984,
"eval_steps_per_second": 1.296,
"step": 400
},
{
"epoch": 4.635838150289017,
"grad_norm": 0.014694861136376858,
"learning_rate": 1.3791361498271705e-06,
"loss": 0.0024,
"step": 401
},
{
"epoch": 4.6473988439306355,
"grad_norm": 0.013526085764169693,
"learning_rate": 1.2860660487922616e-06,
"loss": 0.0023,
"step": 402
},
{
"epoch": 4.658959537572255,
"grad_norm": 0.04836466535925865,
"learning_rate": 1.196206122203647e-06,
"loss": 0.0024,
"step": 403
},
{
"epoch": 4.670520231213873,
"grad_norm": 0.016095180064439774,
"learning_rate": 1.1095622916943494e-06,
"loss": 0.0033,
"step": 404
},
{
"epoch": 4.682080924855491,
"grad_norm": 0.019678572192788124,
"learning_rate": 1.0261402669615505e-06,
"loss": 0.0038,
"step": 405
},
{
"epoch": 4.682080924855491,
"eval_loss": 0.00843281950801611,
"eval_runtime": 10.0323,
"eval_samples_per_second": 4.984,
"eval_steps_per_second": 1.296,
"step": 405
},
{
"epoch": 4.69364161849711,
"grad_norm": 0.013817240484058857,
"learning_rate": 9.459455453902866e-07,
"loss": 0.0024,
"step": 406
},
{
"epoch": 4.705202312138728,
"grad_norm": 0.0165092833340168,
"learning_rate": 8.68983411691221e-07,
"loss": 0.0029,
"step": 407
},
{
"epoch": 4.716763005780347,
"grad_norm": 0.016268854960799217,
"learning_rate": 7.952589375523567e-07,
"loss": 0.0028,
"step": 408
},
{
"epoch": 4.728323699421965,
"grad_norm": 0.013906327076256275,
"learning_rate": 7.247769813048644e-07,
"loss": 0.0025,
"step": 409
},
{
"epoch": 4.7398843930635834,
"grad_norm": 0.0136026069521904,
"learning_rate": 6.57542187602872e-07,
"loss": 0.0024,
"step": 410
},
{
"epoch": 4.7398843930635834,
"eval_loss": 0.00842086412012577,
"eval_runtime": 10.0656,
"eval_samples_per_second": 4.967,
"eval_steps_per_second": 1.292,
"step": 410
},
{
"epoch": 4.7514450867052025,
"grad_norm": 0.022275064140558243,
"learning_rate": 5.935589871174208e-07,
"loss": 0.0028,
"step": 411
},
{
"epoch": 4.763005780346821,
"grad_norm": 0.016609348356723785,
"learning_rate": 5.328315962444874e-07,
"loss": 0.0029,
"step": 412
},
{
"epoch": 4.77456647398844,
"grad_norm": 0.015365025959908962,
"learning_rate": 4.753640168271456e-07,
"loss": 0.0029,
"step": 413
},
{
"epoch": 4.786127167630058,
"grad_norm": 0.014568125829100609,
"learning_rate": 4.2116003589179887e-07,
"loss": 0.0024,
"step": 414
},
{
"epoch": 4.797687861271676,
"grad_norm": 0.014286377467215061,
"learning_rate": 3.702232253986804e-07,
"loss": 0.0024,
"step": 415
},
{
"epoch": 4.797687861271676,
"eval_loss": 0.008439392782747746,
"eval_runtime": 10.0373,
"eval_samples_per_second": 4.981,
"eval_steps_per_second": 1.295,
"step": 415
},
{
"epoch": 4.809248554913295,
"grad_norm": 0.015557860024273396,
"learning_rate": 3.2255694200643006e-07,
"loss": 0.0027,
"step": 416
},
{
"epoch": 4.820809248554913,
"grad_norm": 0.01427092682570219,
"learning_rate": 2.78164326850916e-07,
"loss": 0.0023,
"step": 417
},
{
"epoch": 4.832369942196532,
"grad_norm": 0.01711309514939785,
"learning_rate": 2.3704830533821108e-07,
"loss": 0.0031,
"step": 418
},
{
"epoch": 4.84393063583815,
"grad_norm": 0.015366033650934696,
"learning_rate": 1.9921158695184738e-07,
"loss": 0.0026,
"step": 419
},
{
"epoch": 4.855491329479769,
"grad_norm": 0.013661878183484077,
"learning_rate": 1.6465666507425315e-07,
"loss": 0.0024,
"step": 420
},
{
"epoch": 4.855491329479769,
"eval_loss": 0.008424060419201851,
"eval_runtime": 10.0452,
"eval_samples_per_second": 4.978,
"eval_steps_per_second": 1.294,
"step": 420
},
{
"epoch": 4.867052023121388,
"grad_norm": 0.01569373905658722,
"learning_rate": 1.333858168224178e-07,
"loss": 0.0027,
"step": 421
},
{
"epoch": 4.878612716763006,
"grad_norm": 0.02022898755967617,
"learning_rate": 1.0540110289786742e-07,
"loss": 0.0032,
"step": 422
},
{
"epoch": 4.890173410404624,
"grad_norm": 0.013800419867038727,
"learning_rate": 8.07043674508623e-08,
"loss": 0.0024,
"step": 423
},
{
"epoch": 4.901734104046243,
"grad_norm": 0.017323607578873634,
"learning_rate": 5.929723795884967e-08,
"loss": 0.0029,
"step": 424
},
{
"epoch": 4.913294797687861,
"grad_norm": 0.014345725998282433,
"learning_rate": 4.1181125119221787e-08,
"loss": 0.0026,
"step": 425
},
{
"epoch": 4.913294797687861,
"eval_loss": 0.008420780301094055,
"eval_runtime": 10.0258,
"eval_samples_per_second": 4.987,
"eval_steps_per_second": 1.297,
"step": 425
},
{
"epoch": 4.924855491329479,
"grad_norm": 0.014356693252921104,
"learning_rate": 2.6357222756384636e-08,
"loss": 0.0025,
"step": 426
},
{
"epoch": 4.936416184971098,
"grad_norm": 0.014968520030379295,
"learning_rate": 1.482650774303207e-08,
"loss": 0.0028,
"step": 427
},
{
"epoch": 4.9479768786127165,
"grad_norm": 0.02624007686972618,
"learning_rate": 6.589739935819461e-09,
"loss": 0.0033,
"step": 428
},
{
"epoch": 4.959537572254336,
"grad_norm": 0.016497237607836723,
"learning_rate": 1.6474621252704493e-09,
"loss": 0.0031,
"step": 429
},
{
"epoch": 4.971098265895954,
"grad_norm": 0.01508865226060152,
"learning_rate": 0.0,
"loss": 0.0029,
"step": 430
},
{
"epoch": 4.971098265895954,
"eval_loss": 0.008429310284554958,
"eval_runtime": 10.0331,
"eval_samples_per_second": 4.983,
"eval_steps_per_second": 1.296,
"step": 430
},
{
"epoch": 4.971098265895954,
"step": 430,
"total_flos": 1.0207257247425331e+18,
"train_loss": 0.0087554031284526,
"train_runtime": 8412.5192,
"train_samples_per_second": 1.645,
"train_steps_per_second": 0.051
}
],
"logging_steps": 1,
"max_steps": 430,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0207257247425331e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}