metamath / trainer_state.json
ciyazzzk
First model version
41bc9de
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.964487905301081,
"eval_steps": 200,
"global_step": 180,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016469377251672673,
"grad_norm": 25.988508224487305,
"learning_rate": 1.111111111111111e-06,
"loss": 0.5961,
"step": 1
},
{
"epoch": 0.032938754503345345,
"grad_norm": 26.565134048461914,
"learning_rate": 2.222222222222222e-06,
"loss": 0.595,
"step": 2
},
{
"epoch": 0.049408131755018014,
"grad_norm": 22.700319290161133,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.5211,
"step": 3
},
{
"epoch": 0.06587750900669069,
"grad_norm": 17.019222259521484,
"learning_rate": 4.444444444444444e-06,
"loss": 0.4043,
"step": 4
},
{
"epoch": 0.08234688625836335,
"grad_norm": 15.759174346923828,
"learning_rate": 5.555555555555557e-06,
"loss": 0.3571,
"step": 5
},
{
"epoch": 0.09881626351003603,
"grad_norm": 12.310270309448242,
"learning_rate": 6.666666666666667e-06,
"loss": 0.351,
"step": 6
},
{
"epoch": 0.1152856407617087,
"grad_norm": 6.430344104766846,
"learning_rate": 7.77777777777778e-06,
"loss": 0.3183,
"step": 7
},
{
"epoch": 0.13175501801338138,
"grad_norm": 29.118850708007812,
"learning_rate": 8.888888888888888e-06,
"loss": 0.2986,
"step": 8
},
{
"epoch": 0.14822439526505404,
"grad_norm": 5.387965202331543,
"learning_rate": 1e-05,
"loss": 0.3031,
"step": 9
},
{
"epoch": 0.1646937725167267,
"grad_norm": 4.416501998901367,
"learning_rate": 1.1111111111111113e-05,
"loss": 0.2769,
"step": 10
},
{
"epoch": 0.1811631497683994,
"grad_norm": 3.6543190479278564,
"learning_rate": 1.2222222222222224e-05,
"loss": 0.2596,
"step": 11
},
{
"epoch": 0.19763252702007206,
"grad_norm": 3.7170989513397217,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.2532,
"step": 12
},
{
"epoch": 0.21410190427174472,
"grad_norm": 2.963078498840332,
"learning_rate": 1.4444444444444446e-05,
"loss": 0.241,
"step": 13
},
{
"epoch": 0.2305712815234174,
"grad_norm": 3.8106493949890137,
"learning_rate": 1.555555555555556e-05,
"loss": 0.2355,
"step": 14
},
{
"epoch": 0.24704065877509007,
"grad_norm": 3.3100316524505615,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.227,
"step": 15
},
{
"epoch": 0.26351003602676276,
"grad_norm": 3.8181209564208984,
"learning_rate": 1.7777777777777777e-05,
"loss": 0.222,
"step": 16
},
{
"epoch": 0.2799794132784354,
"grad_norm": 3.1601641178131104,
"learning_rate": 1.888888888888889e-05,
"loss": 0.216,
"step": 17
},
{
"epoch": 0.2964487905301081,
"grad_norm": 3.8540680408477783,
"learning_rate": 2e-05,
"loss": 0.2219,
"step": 18
},
{
"epoch": 0.3129181677817808,
"grad_norm": 2.4376087188720703,
"learning_rate": 1.9998119704485016e-05,
"loss": 0.2124,
"step": 19
},
{
"epoch": 0.3293875450334534,
"grad_norm": 2.0443239212036133,
"learning_rate": 1.9992479525042305e-05,
"loss": 0.2072,
"step": 20
},
{
"epoch": 0.3458569222851261,
"grad_norm": 2.7866272926330566,
"learning_rate": 1.9983081582712684e-05,
"loss": 0.2023,
"step": 21
},
{
"epoch": 0.3623262995367988,
"grad_norm": 2.3178961277008057,
"learning_rate": 1.996992941167792e-05,
"loss": 0.2013,
"step": 22
},
{
"epoch": 0.3787956767884714,
"grad_norm": 2.5984015464782715,
"learning_rate": 1.9953027957931658e-05,
"loss": 0.1933,
"step": 23
},
{
"epoch": 0.3952650540401441,
"grad_norm": 1.8709274530410767,
"learning_rate": 1.9932383577419432e-05,
"loss": 0.1945,
"step": 24
},
{
"epoch": 0.4117344312918168,
"grad_norm": 2.1134493350982666,
"learning_rate": 1.9908004033648452e-05,
"loss": 0.1916,
"step": 25
},
{
"epoch": 0.42820380854348944,
"grad_norm": 2.076616048812866,
"learning_rate": 1.9879898494768093e-05,
"loss": 0.1902,
"step": 26
},
{
"epoch": 0.44467318579516213,
"grad_norm": 2.05837082862854,
"learning_rate": 1.9848077530122083e-05,
"loss": 0.1849,
"step": 27
},
{
"epoch": 0.4611425630468348,
"grad_norm": 1.7282419204711914,
"learning_rate": 1.9812553106273848e-05,
"loss": 0.1843,
"step": 28
},
{
"epoch": 0.47761194029850745,
"grad_norm": 2.2622146606445312,
"learning_rate": 1.9773338582506357e-05,
"loss": 0.1872,
"step": 29
},
{
"epoch": 0.49408131755018014,
"grad_norm": 1.5245953798294067,
"learning_rate": 1.973044870579824e-05,
"loss": 0.182,
"step": 30
},
{
"epoch": 0.5105506948018528,
"grad_norm": 1.590587854385376,
"learning_rate": 1.9683899605278062e-05,
"loss": 0.1767,
"step": 31
},
{
"epoch": 0.5270200720535255,
"grad_norm": 1.6935125589370728,
"learning_rate": 1.9633708786158803e-05,
"loss": 0.1739,
"step": 32
},
{
"epoch": 0.5434894493051982,
"grad_norm": 1.6206973791122437,
"learning_rate": 1.957989512315489e-05,
"loss": 0.1771,
"step": 33
},
{
"epoch": 0.5599588265568708,
"grad_norm": 1.561724066734314,
"learning_rate": 1.9522478853384154e-05,
"loss": 0.1706,
"step": 34
},
{
"epoch": 0.5764282038085435,
"grad_norm": 1.4770928621292114,
"learning_rate": 1.946148156875751e-05,
"loss": 0.1711,
"step": 35
},
{
"epoch": 0.5928975810602162,
"grad_norm": 1.5764074325561523,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.1756,
"step": 36
},
{
"epoch": 0.6093669583118888,
"grad_norm": 2.4715301990509033,
"learning_rate": 1.932883704732001e-05,
"loss": 0.1757,
"step": 37
},
{
"epoch": 0.6258363355635616,
"grad_norm": 1.3919658660888672,
"learning_rate": 1.9257239692688907e-05,
"loss": 0.1658,
"step": 38
},
{
"epoch": 0.6423057128152342,
"grad_norm": 2.575564384460449,
"learning_rate": 1.9182161068802742e-05,
"loss": 0.1768,
"step": 39
},
{
"epoch": 0.6587750900669068,
"grad_norm": 1.6217772960662842,
"learning_rate": 1.9103629409661468e-05,
"loss": 0.1695,
"step": 40
},
{
"epoch": 0.6752444673185796,
"grad_norm": 2.0268895626068115,
"learning_rate": 1.902167424781038e-05,
"loss": 0.1749,
"step": 41
},
{
"epoch": 0.6917138445702522,
"grad_norm": 1.6548528671264648,
"learning_rate": 1.8936326403234125e-05,
"loss": 0.1723,
"step": 42
},
{
"epoch": 0.7081832218219248,
"grad_norm": 1.6716668605804443,
"learning_rate": 1.8847617971766577e-05,
"loss": 0.1724,
"step": 43
},
{
"epoch": 0.7246525990735976,
"grad_norm": 1.6575418710708618,
"learning_rate": 1.8755582313020912e-05,
"loss": 0.1701,
"step": 44
},
{
"epoch": 0.7411219763252702,
"grad_norm": 1.429405689239502,
"learning_rate": 1.866025403784439e-05,
"loss": 0.1665,
"step": 45
},
{
"epoch": 0.7575913535769428,
"grad_norm": 1.5980753898620605,
"learning_rate": 1.8561668995302668e-05,
"loss": 0.1677,
"step": 46
},
{
"epoch": 0.7740607308286156,
"grad_norm": 1.3052290678024292,
"learning_rate": 1.845986425919841e-05,
"loss": 0.1674,
"step": 47
},
{
"epoch": 0.7905301080802882,
"grad_norm": 1.3834303617477417,
"learning_rate": 1.8354878114129368e-05,
"loss": 0.1621,
"step": 48
},
{
"epoch": 0.8069994853319609,
"grad_norm": 1.314771294593811,
"learning_rate": 1.824675004109107e-05,
"loss": 0.1603,
"step": 49
},
{
"epoch": 0.8234688625836336,
"grad_norm": 1.255486249923706,
"learning_rate": 1.8135520702629677e-05,
"loss": 0.1602,
"step": 50
},
{
"epoch": 0.8399382398353062,
"grad_norm": 1.1701654195785522,
"learning_rate": 1.802123192755044e-05,
"loss": 0.1595,
"step": 51
},
{
"epoch": 0.8564076170869789,
"grad_norm": 1.1197280883789062,
"learning_rate": 1.7903926695187595e-05,
"loss": 0.1589,
"step": 52
},
{
"epoch": 0.8728769943386516,
"grad_norm": 1.26057767868042,
"learning_rate": 1.7783649119241603e-05,
"loss": 0.1627,
"step": 53
},
{
"epoch": 0.8893463715903243,
"grad_norm": 1.1180353164672852,
"learning_rate": 1.766044443118978e-05,
"loss": 0.1577,
"step": 54
},
{
"epoch": 0.9058157488419969,
"grad_norm": 1.1337931156158447,
"learning_rate": 1.7534358963276606e-05,
"loss": 0.1563,
"step": 55
},
{
"epoch": 0.9222851260936696,
"grad_norm": 1.1284104585647583,
"learning_rate": 1.740544013109005e-05,
"loss": 0.1552,
"step": 56
},
{
"epoch": 0.9387545033453423,
"grad_norm": 1.1995372772216797,
"learning_rate": 1.7273736415730488e-05,
"loss": 0.1571,
"step": 57
},
{
"epoch": 0.9552238805970149,
"grad_norm": 1.189568042755127,
"learning_rate": 1.7139297345578992e-05,
"loss": 0.1543,
"step": 58
},
{
"epoch": 0.9716932578486875,
"grad_norm": 1.1965142488479614,
"learning_rate": 1.7002173477671685e-05,
"loss": 0.1601,
"step": 59
},
{
"epoch": 0.9881626351003603,
"grad_norm": 1.1370137929916382,
"learning_rate": 1.686241637868734e-05,
"loss": 0.1554,
"step": 60
},
{
"epoch": 1.004632012352033,
"grad_norm": 1.208176612854004,
"learning_rate": 1.6720078605555227e-05,
"loss": 0.1458,
"step": 61
},
{
"epoch": 1.0211013896037056,
"grad_norm": 1.1520111560821533,
"learning_rate": 1.657521368569064e-05,
"loss": 0.1315,
"step": 62
},
{
"epoch": 1.0375707668553782,
"grad_norm": 1.1699644327163696,
"learning_rate": 1.6427876096865394e-05,
"loss": 0.1289,
"step": 63
},
{
"epoch": 1.054040144107051,
"grad_norm": 1.2921233177185059,
"learning_rate": 1.627812124672099e-05,
"loss": 0.1292,
"step": 64
},
{
"epoch": 1.0705095213587237,
"grad_norm": 1.1183018684387207,
"learning_rate": 1.6126005451932028e-05,
"loss": 0.1301,
"step": 65
},
{
"epoch": 1.0869788986103963,
"grad_norm": 1.132805347442627,
"learning_rate": 1.5971585917027864e-05,
"loss": 0.1291,
"step": 66
},
{
"epoch": 1.103448275862069,
"grad_norm": 1.1662507057189941,
"learning_rate": 1.5814920712880267e-05,
"loss": 0.1296,
"step": 67
},
{
"epoch": 1.1199176531137416,
"grad_norm": 1.230918288230896,
"learning_rate": 1.5656068754865388e-05,
"loss": 0.1283,
"step": 68
},
{
"epoch": 1.1363870303654142,
"grad_norm": 1.111428141593933,
"learning_rate": 1.5495089780708062e-05,
"loss": 0.1264,
"step": 69
},
{
"epoch": 1.152856407617087,
"grad_norm": 1.118282437324524,
"learning_rate": 1.5332044328016916e-05,
"loss": 0.1326,
"step": 70
},
{
"epoch": 1.1693257848687597,
"grad_norm": 1.0411953926086426,
"learning_rate": 1.5166993711518631e-05,
"loss": 0.1273,
"step": 71
},
{
"epoch": 1.1857951621204323,
"grad_norm": 1.0174416303634644,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.1236,
"step": 72
},
{
"epoch": 1.202264539372105,
"grad_norm": 1.1264220476150513,
"learning_rate": 1.4831125992966386e-05,
"loss": 0.1262,
"step": 73
},
{
"epoch": 1.2187339166237776,
"grad_norm": 1.1279516220092773,
"learning_rate": 1.4660435197025391e-05,
"loss": 0.1259,
"step": 74
},
{
"epoch": 1.2352032938754502,
"grad_norm": 0.9918281435966492,
"learning_rate": 1.4487991802004625e-05,
"loss": 0.1263,
"step": 75
},
{
"epoch": 1.2516726711271229,
"grad_norm": 1.0115736722946167,
"learning_rate": 1.4313860656812537e-05,
"loss": 0.1281,
"step": 76
},
{
"epoch": 1.2681420483787957,
"grad_norm": 0.993411123752594,
"learning_rate": 1.4138107245051394e-05,
"loss": 0.1287,
"step": 77
},
{
"epoch": 1.2846114256304684,
"grad_norm": 1.0473469495773315,
"learning_rate": 1.396079766039157e-05,
"loss": 0.1272,
"step": 78
},
{
"epoch": 1.301080802882141,
"grad_norm": 0.9457677006721497,
"learning_rate": 1.3781998581716427e-05,
"loss": 0.125,
"step": 79
},
{
"epoch": 1.3175501801338136,
"grad_norm": 0.9976021647453308,
"learning_rate": 1.3601777248047105e-05,
"loss": 0.1256,
"step": 80
},
{
"epoch": 1.3340195573854863,
"grad_norm": 1.0927587747573853,
"learning_rate": 1.342020143325669e-05,
"loss": 0.1265,
"step": 81
},
{
"epoch": 1.3504889346371591,
"grad_norm": 1.0087074041366577,
"learning_rate": 1.3237339420583213e-05,
"loss": 0.1252,
"step": 82
},
{
"epoch": 1.3669583118888318,
"grad_norm": 1.046044945716858,
"learning_rate": 1.3053259976951134e-05,
"loss": 0.1247,
"step": 83
},
{
"epoch": 1.3834276891405044,
"grad_norm": 1.0403574705123901,
"learning_rate": 1.2868032327110904e-05,
"loss": 0.1254,
"step": 84
},
{
"epoch": 1.399897066392177,
"grad_norm": 0.9709586501121521,
"learning_rate": 1.2681726127606374e-05,
"loss": 0.1295,
"step": 85
},
{
"epoch": 1.4163664436438497,
"grad_norm": 0.9234594106674194,
"learning_rate": 1.2494411440579814e-05,
"loss": 0.1226,
"step": 86
},
{
"epoch": 1.4328358208955223,
"grad_norm": 1.01310133934021,
"learning_rate": 1.2306158707424402e-05,
"loss": 0.1243,
"step": 87
},
{
"epoch": 1.449305198147195,
"grad_norm": 0.977340579032898,
"learning_rate": 1.211703872229411e-05,
"loss": 0.1252,
"step": 88
},
{
"epoch": 1.4657745753988678,
"grad_norm": 0.9476578235626221,
"learning_rate": 1.1927122605480899e-05,
"loss": 0.1235,
"step": 89
},
{
"epoch": 1.4822439526505404,
"grad_norm": 0.9919003248214722,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.1238,
"step": 90
},
{
"epoch": 1.498713329902213,
"grad_norm": 0.9731943011283875,
"learning_rate": 1.1545187928078407e-05,
"loss": 0.1269,
"step": 91
},
{
"epoch": 1.5151827071538857,
"grad_norm": 0.9613791108131409,
"learning_rate": 1.1353312997501313e-05,
"loss": 0.1278,
"step": 92
},
{
"epoch": 1.5316520844055583,
"grad_norm": 0.9685577750205994,
"learning_rate": 1.1160929141252303e-05,
"loss": 0.1255,
"step": 93
},
{
"epoch": 1.5481214616572312,
"grad_norm": 0.9065141081809998,
"learning_rate": 1.0968108707031792e-05,
"loss": 0.1229,
"step": 94
},
{
"epoch": 1.5645908389089036,
"grad_norm": 0.9448639750480652,
"learning_rate": 1.077492420671931e-05,
"loss": 0.1228,
"step": 95
},
{
"epoch": 1.5810602161605765,
"grad_norm": 0.91968834400177,
"learning_rate": 1.0581448289104759e-05,
"loss": 0.1238,
"step": 96
},
{
"epoch": 1.597529593412249,
"grad_norm": 0.9806727170944214,
"learning_rate": 1.038775371256817e-05,
"loss": 0.1256,
"step": 97
},
{
"epoch": 1.6139989706639217,
"grad_norm": 0.9167839884757996,
"learning_rate": 1.0193913317718245e-05,
"loss": 0.1249,
"step": 98
},
{
"epoch": 1.6304683479155946,
"grad_norm": 0.9122770428657532,
"learning_rate": 1e-05,
"loss": 0.1254,
"step": 99
},
{
"epoch": 1.646937725167267,
"grad_norm": 0.9491678476333618,
"learning_rate": 9.806086682281759e-06,
"loss": 0.1216,
"step": 100
},
{
"epoch": 1.6634071024189399,
"grad_norm": 0.8831928968429565,
"learning_rate": 9.612246287431832e-06,
"loss": 0.1212,
"step": 101
},
{
"epoch": 1.6798764796706125,
"grad_norm": 0.8818361759185791,
"learning_rate": 9.418551710895243e-06,
"loss": 0.1206,
"step": 102
},
{
"epoch": 1.6963458569222851,
"grad_norm": 0.9016006588935852,
"learning_rate": 9.225075793280693e-06,
"loss": 0.1218,
"step": 103
},
{
"epoch": 1.7128152341739578,
"grad_norm": 0.9079029560089111,
"learning_rate": 9.03189129296821e-06,
"loss": 0.1198,
"step": 104
},
{
"epoch": 1.7292846114256304,
"grad_norm": 0.9520952105522156,
"learning_rate": 8.839070858747697e-06,
"loss": 0.123,
"step": 105
},
{
"epoch": 1.7457539886773032,
"grad_norm": 0.9180439114570618,
"learning_rate": 8.646687002498692e-06,
"loss": 0.1213,
"step": 106
},
{
"epoch": 1.7622233659289757,
"grad_norm": 0.8695424199104309,
"learning_rate": 8.454812071921597e-06,
"loss": 0.1205,
"step": 107
},
{
"epoch": 1.7786927431806485,
"grad_norm": 0.8801153302192688,
"learning_rate": 8.263518223330698e-06,
"loss": 0.1169,
"step": 108
},
{
"epoch": 1.7951621204323212,
"grad_norm": 0.8642145395278931,
"learning_rate": 8.072877394519103e-06,
"loss": 0.1176,
"step": 109
},
{
"epoch": 1.8116314976839938,
"grad_norm": 0.8979960680007935,
"learning_rate": 7.882961277705897e-06,
"loss": 0.1204,
"step": 110
},
{
"epoch": 1.8281008749356666,
"grad_norm": 0.820036768913269,
"learning_rate": 7.6938412925756e-06,
"loss": 0.1213,
"step": 111
},
{
"epoch": 1.844570252187339,
"grad_norm": 0.8929469585418701,
"learning_rate": 7.505588559420188e-06,
"loss": 0.1186,
"step": 112
},
{
"epoch": 1.861039629439012,
"grad_norm": 0.8560171127319336,
"learning_rate": 7.3182738723936255e-06,
"loss": 0.1207,
"step": 113
},
{
"epoch": 1.8775090066906845,
"grad_norm": 0.8577896952629089,
"learning_rate": 7.131967672889101e-06,
"loss": 0.119,
"step": 114
},
{
"epoch": 1.8939783839423572,
"grad_norm": 0.8649168610572815,
"learning_rate": 6.94674002304887e-06,
"loss": 0.1171,
"step": 115
},
{
"epoch": 1.9104477611940298,
"grad_norm": 0.8647825121879578,
"learning_rate": 6.762660579416791e-06,
"loss": 0.119,
"step": 116
},
{
"epoch": 1.9269171384457024,
"grad_norm": 0.8913580775260925,
"learning_rate": 6.579798566743314e-06,
"loss": 0.1178,
"step": 117
},
{
"epoch": 1.9433865156973753,
"grad_norm": 0.8711913824081421,
"learning_rate": 6.3982227519528986e-06,
"loss": 0.1177,
"step": 118
},
{
"epoch": 1.9598558929490477,
"grad_norm": 0.887152910232544,
"learning_rate": 6.218001418283577e-06,
"loss": 0.1208,
"step": 119
},
{
"epoch": 1.9763252702007206,
"grad_norm": 0.9313440918922424,
"learning_rate": 6.039202339608432e-06,
"loss": 0.1174,
"step": 120
},
{
"epoch": 1.9927946474523932,
"grad_norm": 0.971422553062439,
"learning_rate": 5.8618927549486095e-06,
"loss": 0.1205,
"step": 121
},
{
"epoch": 2.009264024704066,
"grad_norm": 0.9346736669540405,
"learning_rate": 5.686139343187468e-06,
"loss": 0.1045,
"step": 122
},
{
"epoch": 2.0257334019557387,
"grad_norm": 0.9971660375595093,
"learning_rate": 5.512008197995379e-06,
"loss": 0.0927,
"step": 123
},
{
"epoch": 2.042202779207411,
"grad_norm": 0.8701250553131104,
"learning_rate": 5.339564802974615e-06,
"loss": 0.0917,
"step": 124
},
{
"epoch": 2.058672156459084,
"grad_norm": 0.9550381898880005,
"learning_rate": 5.168874007033615e-06,
"loss": 0.089,
"step": 125
},
{
"epoch": 2.0751415337107564,
"grad_norm": 0.985183835029602,
"learning_rate": 5.000000000000003e-06,
"loss": 0.0878,
"step": 126
},
{
"epoch": 2.0916109109624292,
"grad_norm": 1.021644949913025,
"learning_rate": 4.8330062884813714e-06,
"loss": 0.0877,
"step": 127
},
{
"epoch": 2.108080288214102,
"grad_norm": 0.967758297920227,
"learning_rate": 4.66795567198309e-06,
"loss": 0.0888,
"step": 128
},
{
"epoch": 2.1245496654657745,
"grad_norm": 0.9479052424430847,
"learning_rate": 4.504910219291941e-06,
"loss": 0.0893,
"step": 129
},
{
"epoch": 2.1410190427174474,
"grad_norm": 0.8988873362541199,
"learning_rate": 4.343931245134616e-06,
"loss": 0.0885,
"step": 130
},
{
"epoch": 2.1574884199691198,
"grad_norm": 0.8895907402038574,
"learning_rate": 4.185079287119733e-06,
"loss": 0.0875,
"step": 131
},
{
"epoch": 2.1739577972207926,
"grad_norm": 0.8918712139129639,
"learning_rate": 4.028414082972141e-06,
"loss": 0.0874,
"step": 132
},
{
"epoch": 2.1904271744724655,
"grad_norm": 0.8613003492355347,
"learning_rate": 3.873994548067972e-06,
"loss": 0.0864,
"step": 133
},
{
"epoch": 2.206896551724138,
"grad_norm": 0.8328737020492554,
"learning_rate": 3.7218787532790167e-06,
"loss": 0.0869,
"step": 134
},
{
"epoch": 2.2233659289758108,
"grad_norm": 0.8785966038703918,
"learning_rate": 3.5721239031346067e-06,
"loss": 0.0866,
"step": 135
},
{
"epoch": 2.239835306227483,
"grad_norm": 0.9229673743247986,
"learning_rate": 3.424786314309365e-06,
"loss": 0.0865,
"step": 136
},
{
"epoch": 2.256304683479156,
"grad_norm": 0.9559910893440247,
"learning_rate": 3.279921394444776e-06,
"loss": 0.0865,
"step": 137
},
{
"epoch": 2.2727740607308284,
"grad_norm": 0.9126923680305481,
"learning_rate": 3.1375836213126653e-06,
"loss": 0.0853,
"step": 138
},
{
"epoch": 2.2892434379825013,
"grad_norm": 0.8650088310241699,
"learning_rate": 2.9978265223283152e-06,
"loss": 0.0859,
"step": 139
},
{
"epoch": 2.305712815234174,
"grad_norm": 0.8619425296783447,
"learning_rate": 2.8607026544210115e-06,
"loss": 0.0854,
"step": 140
},
{
"epoch": 2.3221821924858466,
"grad_norm": 0.930046558380127,
"learning_rate": 2.726263584269513e-06,
"loss": 0.088,
"step": 141
},
{
"epoch": 2.3386515697375194,
"grad_norm": 0.8604719638824463,
"learning_rate": 2.594559868909956e-06,
"loss": 0.0853,
"step": 142
},
{
"epoch": 2.355120946989192,
"grad_norm": 0.8624026775360107,
"learning_rate": 2.4656410367233928e-06,
"loss": 0.0856,
"step": 143
},
{
"epoch": 2.3715903242408647,
"grad_norm": 0.8655623197555542,
"learning_rate": 2.339555568810221e-06,
"loss": 0.087,
"step": 144
},
{
"epoch": 2.388059701492537,
"grad_norm": 0.8891249895095825,
"learning_rate": 2.2163508807584e-06,
"loss": 0.0875,
"step": 145
},
{
"epoch": 2.40452907874421,
"grad_norm": 0.8799765706062317,
"learning_rate": 2.0960733048124082e-06,
"loss": 0.0861,
"step": 146
},
{
"epoch": 2.420998455995883,
"grad_norm": 0.8611019849777222,
"learning_rate": 1.9787680724495617e-06,
"loss": 0.0849,
"step": 147
},
{
"epoch": 2.4374678332475552,
"grad_norm": 0.8410822749137878,
"learning_rate": 1.8644792973703252e-06,
"loss": 0.085,
"step": 148
},
{
"epoch": 2.453937210499228,
"grad_norm": 0.9146021604537964,
"learning_rate": 1.7532499589089324e-06,
"loss": 0.0852,
"step": 149
},
{
"epoch": 2.4704065877509005,
"grad_norm": 0.875053346157074,
"learning_rate": 1.6451218858706374e-06,
"loss": 0.0852,
"step": 150
},
{
"epoch": 2.4868759650025734,
"grad_norm": 0.8440834283828735,
"learning_rate": 1.5401357408015893e-06,
"loss": 0.086,
"step": 151
},
{
"epoch": 2.5033453422542458,
"grad_norm": 0.8311520218849182,
"learning_rate": 1.4383310046973365e-06,
"loss": 0.0849,
"step": 152
},
{
"epoch": 2.5198147195059186,
"grad_norm": 0.8406962752342224,
"learning_rate": 1.339745962155613e-06,
"loss": 0.083,
"step": 153
},
{
"epoch": 2.5362840967575915,
"grad_norm": 0.8209373354911804,
"learning_rate": 1.2444176869790925e-06,
"loss": 0.0813,
"step": 154
},
{
"epoch": 2.552753474009264,
"grad_norm": 0.8779584169387817,
"learning_rate": 1.152382028233422e-06,
"loss": 0.0868,
"step": 155
},
{
"epoch": 2.5692228512609367,
"grad_norm": 0.8810158967971802,
"learning_rate": 1.0636735967658785e-06,
"loss": 0.0865,
"step": 156
},
{
"epoch": 2.5856922285126096,
"grad_norm": 0.865736722946167,
"learning_rate": 9.783257521896228e-07,
"loss": 0.0846,
"step": 157
},
{
"epoch": 2.602161605764282,
"grad_norm": 0.845588207244873,
"learning_rate": 8.963705903385344e-07,
"loss": 0.0847,
"step": 158
},
{
"epoch": 2.618630983015955,
"grad_norm": 0.815373420715332,
"learning_rate": 8.178389311972612e-07,
"loss": 0.0845,
"step": 159
},
{
"epoch": 2.6351003602676273,
"grad_norm": 0.7676054835319519,
"learning_rate": 7.427603073110967e-07,
"loss": 0.0833,
"step": 160
},
{
"epoch": 2.6515697375193,
"grad_norm": 0.7454907894134521,
"learning_rate": 6.711629526799946e-07,
"loss": 0.0819,
"step": 161
},
{
"epoch": 2.6680391147709726,
"grad_norm": 0.7519494891166687,
"learning_rate": 6.030737921409169e-07,
"loss": 0.0847,
"step": 162
},
{
"epoch": 2.6845084920226454,
"grad_norm": 0.7272472977638245,
"learning_rate": 5.385184312424973e-07,
"loss": 0.0823,
"step": 163
},
{
"epoch": 2.7009778692743183,
"grad_norm": 0.7485241293907166,
"learning_rate": 4.775211466158469e-07,
"loss": 0.0865,
"step": 164
},
{
"epoch": 2.7174472465259907,
"grad_norm": 0.7801215052604675,
"learning_rate": 4.2010487684511105e-07,
"loss": 0.0858,
"step": 165
},
{
"epoch": 2.7339166237776635,
"grad_norm": 0.7571792602539062,
"learning_rate": 3.662912138411967e-07,
"loss": 0.083,
"step": 166
},
{
"epoch": 2.750386001029336,
"grad_norm": 0.7191453576087952,
"learning_rate": 3.161003947219421e-07,
"loss": 0.0833,
"step": 167
},
{
"epoch": 2.766855378281009,
"grad_norm": 0.7716163992881775,
"learning_rate": 2.6955129420176193e-07,
"loss": 0.0841,
"step": 168
},
{
"epoch": 2.783324755532681,
"grad_norm": 0.7539470195770264,
"learning_rate": 2.2666141749364434e-07,
"loss": 0.0836,
"step": 169
},
{
"epoch": 2.799794132784354,
"grad_norm": 0.7242315411567688,
"learning_rate": 1.874468937261531e-07,
"loss": 0.0821,
"step": 170
},
{
"epoch": 2.816263510036027,
"grad_norm": 0.7200111746788025,
"learning_rate": 1.519224698779198e-07,
"loss": 0.0837,
"step": 171
},
{
"epoch": 2.8327328872876993,
"grad_norm": 0.7370088696479797,
"learning_rate": 1.201015052319099e-07,
"loss": 0.084,
"step": 172
},
{
"epoch": 2.849202264539372,
"grad_norm": 0.7719192504882812,
"learning_rate": 9.199596635154684e-08,
"loss": 0.0837,
"step": 173
},
{
"epoch": 2.8656716417910446,
"grad_norm": 0.7894994020462036,
"learning_rate": 6.761642258056977e-08,
"loss": 0.0835,
"step": 174
},
{
"epoch": 2.8821410190427175,
"grad_norm": 0.7965743541717529,
"learning_rate": 4.6972042068341714e-08,
"loss": 0.0823,
"step": 175
},
{
"epoch": 2.89861039629439,
"grad_norm": 0.7946169376373291,
"learning_rate": 3.0070588322079765e-08,
"loss": 0.0806,
"step": 176
},
{
"epoch": 2.9150797735460627,
"grad_norm": 0.8272018432617188,
"learning_rate": 1.6918417287318245e-08,
"loss": 0.0828,
"step": 177
},
{
"epoch": 2.9315491507977356,
"grad_norm": 0.8682279586791992,
"learning_rate": 7.520474957699586e-09,
"loss": 0.0849,
"step": 178
},
{
"epoch": 2.948018528049408,
"grad_norm": 0.834924578666687,
"learning_rate": 1.8802955149865854e-09,
"loss": 0.0837,
"step": 179
},
{
"epoch": 2.964487905301081,
"grad_norm": 0.8157399892807007,
"learning_rate": 0.0,
"loss": 0.0827,
"step": 180
},
{
"epoch": 2.964487905301081,
"step": 180,
"total_flos": 8.05255292248916e+18,
"train_loss": 0.14365306976768705,
"train_runtime": 16152.8942,
"train_samples_per_second": 5.774,
"train_steps_per_second": 0.011
}
],
"logging_steps": 1.0,
"max_steps": 180,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.05255292248916e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}