PEFT
Safetensors
qwen2
alignment-handbook
trl
sft
Generated from Trainer
masked_train_r1_continuation / trainer_state.json
aadityap's picture
Model save
500a079 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 176,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011363636363636364,
"grad_norm": 0.05071757916612924,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.9533,
"step": 1
},
{
"epoch": 0.022727272727272728,
"grad_norm": 0.04259673661044319,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.8187,
"step": 2
},
{
"epoch": 0.03409090909090909,
"grad_norm": 0.038167269228831166,
"learning_rate": 4.9999999999999996e-06,
"loss": 0.7078,
"step": 3
},
{
"epoch": 0.045454545454545456,
"grad_norm": 0.05050074141516605,
"learning_rate": 6.666666666666667e-06,
"loss": 0.9871,
"step": 4
},
{
"epoch": 0.056818181818181816,
"grad_norm": 0.0519749083650117,
"learning_rate": 8.333333333333334e-06,
"loss": 0.9578,
"step": 5
},
{
"epoch": 0.06818181818181818,
"grad_norm": 0.04574809279613752,
"learning_rate": 9.999999999999999e-06,
"loss": 0.9209,
"step": 6
},
{
"epoch": 0.07954545454545454,
"grad_norm": 0.044715017183707254,
"learning_rate": 1.1666666666666668e-05,
"loss": 0.7979,
"step": 7
},
{
"epoch": 0.09090909090909091,
"grad_norm": 0.05226409537083054,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.877,
"step": 8
},
{
"epoch": 0.10227272727272728,
"grad_norm": 0.04668468864592418,
"learning_rate": 1.5e-05,
"loss": 0.7896,
"step": 9
},
{
"epoch": 0.11363636363636363,
"grad_norm": 0.047115395620200826,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.7995,
"step": 10
},
{
"epoch": 0.125,
"grad_norm": 0.05014552073730939,
"learning_rate": 1.8333333333333336e-05,
"loss": 0.9263,
"step": 11
},
{
"epoch": 0.13636363636363635,
"grad_norm": 0.05372632215952556,
"learning_rate": 1.9999999999999998e-05,
"loss": 0.9383,
"step": 12
},
{
"epoch": 0.14772727272727273,
"grad_norm": 0.05707657034738693,
"learning_rate": 2.1666666666666667e-05,
"loss": 0.8975,
"step": 13
},
{
"epoch": 0.1590909090909091,
"grad_norm": 0.051859271383260366,
"learning_rate": 2.3333333333333336e-05,
"loss": 0.8436,
"step": 14
},
{
"epoch": 0.17045454545454544,
"grad_norm": 0.043928447365915155,
"learning_rate": 2.5e-05,
"loss": 0.8173,
"step": 15
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.041643505841243426,
"learning_rate": 2.6666666666666667e-05,
"loss": 0.8475,
"step": 16
},
{
"epoch": 0.19318181818181818,
"grad_norm": 0.035898193549085154,
"learning_rate": 2.8333333333333332e-05,
"loss": 0.7011,
"step": 17
},
{
"epoch": 0.20454545454545456,
"grad_norm": 0.03546003498547787,
"learning_rate": 3e-05,
"loss": 0.7448,
"step": 18
},
{
"epoch": 0.2159090909090909,
"grad_norm": 0.03349420007958397,
"learning_rate": 2.9997034946550984e-05,
"loss": 0.787,
"step": 19
},
{
"epoch": 0.22727272727272727,
"grad_norm": 0.034648139602108434,
"learning_rate": 2.9988140958409528e-05,
"loss": 0.8557,
"step": 20
},
{
"epoch": 0.23863636363636365,
"grad_norm": 0.03180472516603228,
"learning_rate": 2.9973321551728995e-05,
"loss": 0.8827,
"step": 21
},
{
"epoch": 0.25,
"grad_norm": 0.03533912995703418,
"learning_rate": 2.995258258522044e-05,
"loss": 0.8307,
"step": 22
},
{
"epoch": 0.26136363636363635,
"grad_norm": 0.02773960323166012,
"learning_rate": 2.992593225783641e-05,
"loss": 0.7875,
"step": 23
},
{
"epoch": 0.2727272727272727,
"grad_norm": 0.03211541529448073,
"learning_rate": 2.98933811055296e-05,
"loss": 0.7867,
"step": 24
},
{
"epoch": 0.2840909090909091,
"grad_norm": 0.03484278990529821,
"learning_rate": 2.985494199708753e-05,
"loss": 0.7932,
"step": 25
},
{
"epoch": 0.29545454545454547,
"grad_norm": 0.03540049270625024,
"learning_rate": 2.9810630129045003e-05,
"loss": 0.9371,
"step": 26
},
{
"epoch": 0.3068181818181818,
"grad_norm": 0.03612849262444276,
"learning_rate": 2.976046301967631e-05,
"loss": 0.7899,
"step": 27
},
{
"epoch": 0.3181818181818182,
"grad_norm": 0.03728774007148355,
"learning_rate": 2.9704460502069544e-05,
"loss": 0.8348,
"step": 28
},
{
"epoch": 0.32954545454545453,
"grad_norm": 0.043320831531330754,
"learning_rate": 2.9642644716285765e-05,
"loss": 0.7098,
"step": 29
},
{
"epoch": 0.3409090909090909,
"grad_norm": 0.046393197374255936,
"learning_rate": 2.957504010060615e-05,
"loss": 0.7655,
"step": 30
},
{
"epoch": 0.3522727272727273,
"grad_norm": 0.03304316691451998,
"learning_rate": 2.950167338187056e-05,
"loss": 0.7992,
"step": 31
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.038820818042923616,
"learning_rate": 2.9422573564911305e-05,
"loss": 0.7544,
"step": 32
},
{
"epoch": 0.375,
"grad_norm": 0.0404422735598537,
"learning_rate": 2.933777192108641e-05,
"loss": 0.7165,
"step": 33
},
{
"epoch": 0.38636363636363635,
"grad_norm": 0.03617558322088643,
"learning_rate": 2.924730197591674e-05,
"loss": 0.7778,
"step": 34
},
{
"epoch": 0.3977272727272727,
"grad_norm": 0.03530529645983289,
"learning_rate": 2.9151199495832017e-05,
"loss": 0.7011,
"step": 35
},
{
"epoch": 0.4090909090909091,
"grad_norm": 0.0314407433278301,
"learning_rate": 2.904950247403092e-05,
"loss": 0.7714,
"step": 36
},
{
"epoch": 0.42045454545454547,
"grad_norm": 0.02890718034687669,
"learning_rate": 2.8942251115460808e-05,
"loss": 0.8296,
"step": 37
},
{
"epoch": 0.4318181818181818,
"grad_norm": 0.026351375691308618,
"learning_rate": 2.88294878209231e-05,
"loss": 0.7224,
"step": 38
},
{
"epoch": 0.4431818181818182,
"grad_norm": 0.026057911050370135,
"learning_rate": 2.871125717031052e-05,
"loss": 0.7434,
"step": 39
},
{
"epoch": 0.45454545454545453,
"grad_norm": 0.026073913399806834,
"learning_rate": 2.858760590498285e-05,
"loss": 0.8207,
"step": 40
},
{
"epoch": 0.4659090909090909,
"grad_norm": 0.020463452540514345,
"learning_rate": 2.8458582909288185e-05,
"loss": 0.7069,
"step": 41
},
{
"epoch": 0.4772727272727273,
"grad_norm": 0.02324047128233221,
"learning_rate": 2.832423919123698e-05,
"loss": 0.7914,
"step": 42
},
{
"epoch": 0.48863636363636365,
"grad_norm": 0.019473403032255244,
"learning_rate": 2.8184627862336507e-05,
"loss": 0.6676,
"step": 43
},
{
"epoch": 0.5,
"grad_norm": 0.018395880480375864,
"learning_rate": 2.8039804116593743e-05,
"loss": 0.7095,
"step": 44
},
{
"epoch": 0.5113636363636364,
"grad_norm": 0.02161835607931781,
"learning_rate": 2.7889825208694915e-05,
"loss": 0.8338,
"step": 45
},
{
"epoch": 0.5227272727272727,
"grad_norm": 0.016724773851591157,
"learning_rate": 2.7734750431370462e-05,
"loss": 0.7122,
"step": 46
},
{
"epoch": 0.5340909090909091,
"grad_norm": 0.017846968866685832,
"learning_rate": 2.7574641091954148e-05,
"loss": 0.6685,
"step": 47
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.016985703113680868,
"learning_rate": 2.7409560488145863e-05,
"loss": 0.7548,
"step": 48
},
{
"epoch": 0.5568181818181818,
"grad_norm": 0.01759149371654561,
"learning_rate": 2.7239573882987418e-05,
"loss": 0.7099,
"step": 49
},
{
"epoch": 0.5681818181818182,
"grad_norm": 0.015018271516298119,
"learning_rate": 2.7064748479061476e-05,
"loss": 0.7512,
"step": 50
},
{
"epoch": 0.5795454545454546,
"grad_norm": 0.017114886858124977,
"learning_rate": 2.6885153391923615e-05,
"loss": 0.6785,
"step": 51
},
{
"epoch": 0.5909090909090909,
"grad_norm": 0.019418726094752342,
"learning_rate": 2.6700859622778184e-05,
"loss": 0.6766,
"step": 52
},
{
"epoch": 0.6022727272727273,
"grad_norm": 0.016895532120200757,
"learning_rate": 2.651194003040862e-05,
"loss": 0.7728,
"step": 53
},
{
"epoch": 0.6136363636363636,
"grad_norm": 0.018451999655787327,
"learning_rate": 2.6318469302373453e-05,
"loss": 0.6993,
"step": 54
},
{
"epoch": 0.625,
"grad_norm": 0.018521256672385557,
"learning_rate": 2.6120523925479275e-05,
"loss": 0.6963,
"step": 55
},
{
"epoch": 0.6363636363636364,
"grad_norm": 0.01628280369064653,
"learning_rate": 2.5918182155542415e-05,
"loss": 0.6873,
"step": 56
},
{
"epoch": 0.6477272727272727,
"grad_norm": 0.016688951971934624,
"learning_rate": 2.571152398645125e-05,
"loss": 0.754,
"step": 57
},
{
"epoch": 0.6590909090909091,
"grad_norm": 0.015191399660727446,
"learning_rate": 2.550063111854138e-05,
"loss": 0.5572,
"step": 58
},
{
"epoch": 0.6704545454545454,
"grad_norm": 0.014810629906998002,
"learning_rate": 2.52855869262962e-05,
"loss": 0.7199,
"step": 59
},
{
"epoch": 0.6818181818181818,
"grad_norm": 0.01711427193939343,
"learning_rate": 2.5066476425385552e-05,
"loss": 0.7315,
"step": 60
},
{
"epoch": 0.6931818181818182,
"grad_norm": 0.015569879054517195,
"learning_rate": 2.4843386239055634e-05,
"loss": 0.6592,
"step": 61
},
{
"epoch": 0.7045454545454546,
"grad_norm": 0.013751124893475849,
"learning_rate": 2.4616404563883302e-05,
"loss": 0.7748,
"step": 62
},
{
"epoch": 0.7159090909090909,
"grad_norm": 0.015005014515577788,
"learning_rate": 2.43856211349084e-05,
"loss": 0.7134,
"step": 63
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.013911288664586612,
"learning_rate": 2.4151127190157864e-05,
"loss": 0.7309,
"step": 64
},
{
"epoch": 0.7386363636363636,
"grad_norm": 0.01508230536135687,
"learning_rate": 2.3913015434575658e-05,
"loss": 0.7162,
"step": 65
},
{
"epoch": 0.75,
"grad_norm": 0.01371246304799072,
"learning_rate": 2.3671380003372726e-05,
"loss": 0.7309,
"step": 66
},
{
"epoch": 0.7613636363636364,
"grad_norm": 0.01587857420654162,
"learning_rate": 2.3426316424811563e-05,
"loss": 0.6933,
"step": 67
},
{
"epoch": 0.7727272727272727,
"grad_norm": 0.02112505189200146,
"learning_rate": 2.3177921582440015e-05,
"loss": 0.8161,
"step": 68
},
{
"epoch": 0.7840909090909091,
"grad_norm": 0.015148764288189093,
"learning_rate": 2.2926293676789295e-05,
"loss": 0.6977,
"step": 69
},
{
"epoch": 0.7954545454545454,
"grad_norm": 0.0151890594638358,
"learning_rate": 2.2671532186551335e-05,
"loss": 0.6527,
"step": 70
},
{
"epoch": 0.8068181818181818,
"grad_norm": 0.014515147138003984,
"learning_rate": 2.2413737829250842e-05,
"loss": 0.7171,
"step": 71
},
{
"epoch": 0.8181818181818182,
"grad_norm": 0.016798521672122816,
"learning_rate": 2.2153012521427593e-05,
"loss": 0.725,
"step": 72
},
{
"epoch": 0.8295454545454546,
"grad_norm": 0.016175606452070515,
"learning_rate": 2.1889459338344667e-05,
"loss": 0.7314,
"step": 73
},
{
"epoch": 0.8409090909090909,
"grad_norm": 0.01731472464207072,
"learning_rate": 2.162318247323868e-05,
"loss": 0.6708,
"step": 74
},
{
"epoch": 0.8522727272727273,
"grad_norm": 0.015934196182729173,
"learning_rate": 2.1354287196127925e-05,
"loss": 0.6836,
"step": 75
},
{
"epoch": 0.8636363636363636,
"grad_norm": 0.013335486137157236,
"learning_rate": 2.108287981219491e-05,
"loss": 0.7366,
"step": 76
},
{
"epoch": 0.875,
"grad_norm": 0.016178491108168734,
"learning_rate": 2.0809067619759618e-05,
"loss": 0.7951,
"step": 77
},
{
"epoch": 0.8863636363636364,
"grad_norm": 0.0176795976865852,
"learning_rate": 2.0532958867860115e-05,
"loss": 0.7131,
"step": 78
},
{
"epoch": 0.8977272727272727,
"grad_norm": 0.015333567208780848,
"learning_rate": 2.0254662713457366e-05,
"loss": 0.7524,
"step": 79
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.013938237896385245,
"learning_rate": 1.997428917828102e-05,
"loss": 0.6899,
"step": 80
},
{
"epoch": 0.9204545454545454,
"grad_norm": 0.014571786919497603,
"learning_rate": 1.969194910533341e-05,
"loss": 0.6546,
"step": 81
},
{
"epoch": 0.9318181818181818,
"grad_norm": 0.015175277215365669,
"learning_rate": 1.9407754115068814e-05,
"loss": 0.6288,
"step": 82
},
{
"epoch": 0.9431818181818182,
"grad_norm": 0.016148886357585004,
"learning_rate": 1.9121816561265377e-05,
"loss": 0.6456,
"step": 83
},
{
"epoch": 0.9545454545454546,
"grad_norm": 0.017478015375242625,
"learning_rate": 1.883424948660712e-05,
"loss": 0.6011,
"step": 84
},
{
"epoch": 0.9659090909090909,
"grad_norm": 0.015501462455188693,
"learning_rate": 1.8545166577993576e-05,
"loss": 0.6796,
"step": 85
},
{
"epoch": 0.9772727272727273,
"grad_norm": 0.016853145780441673,
"learning_rate": 1.825468212159477e-05,
"loss": 0.6703,
"step": 86
},
{
"epoch": 0.9886363636363636,
"grad_norm": 0.013534100626216232,
"learning_rate": 1.7962910957669292e-05,
"loss": 0.6525,
"step": 87
},
{
"epoch": 1.0,
"grad_norm": 0.01437747648796264,
"learning_rate": 1.766996843516326e-05,
"loss": 0.703,
"step": 88
},
{
"epoch": 1.0113636363636365,
"grad_norm": 0.014962989333621064,
"learning_rate": 1.7375970366108225e-05,
"loss": 0.6708,
"step": 89
},
{
"epoch": 1.0227272727272727,
"grad_norm": 0.013628354300466874,
"learning_rate": 1.7081032979836027e-05,
"loss": 0.6684,
"step": 90
},
{
"epoch": 1.0340909090909092,
"grad_norm": 0.014757516119950007,
"learning_rate": 1.6785272877028573e-05,
"loss": 0.8039,
"step": 91
},
{
"epoch": 1.0454545454545454,
"grad_norm": 0.016583346086451647,
"learning_rate": 1.6488806983620927e-05,
"loss": 0.6698,
"step": 92
},
{
"epoch": 1.0568181818181819,
"grad_norm": 0.01557946748011454,
"learning_rate": 1.619175250457572e-05,
"loss": 0.7158,
"step": 93
},
{
"epoch": 1.0681818181818181,
"grad_norm": 0.015238083013160661,
"learning_rate": 1.5894226877547298e-05,
"loss": 0.6552,
"step": 94
},
{
"epoch": 1.0795454545454546,
"grad_norm": 0.01786276315466234,
"learning_rate": 1.5596347726453887e-05,
"loss": 0.751,
"step": 95
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.016043977197405003,
"learning_rate": 1.5298232814976053e-05,
"loss": 0.7216,
"step": 96
},
{
"epoch": 1.1022727272727273,
"grad_norm": 0.014581212655384385,
"learning_rate": 1.5e-05,
"loss": 0.7099,
"step": 97
},
{
"epoch": 1.1136363636363635,
"grad_norm": 0.014614656214189087,
"learning_rate": 1.4701767185023948e-05,
"loss": 0.6997,
"step": 98
},
{
"epoch": 1.125,
"grad_norm": 0.014304947288667047,
"learning_rate": 1.4403652273546118e-05,
"loss": 0.6585,
"step": 99
},
{
"epoch": 1.1363636363636362,
"grad_norm": 0.013971774404435894,
"learning_rate": 1.4105773122452703e-05,
"loss": 0.6652,
"step": 100
},
{
"epoch": 1.1477272727272727,
"grad_norm": 0.014471095814527176,
"learning_rate": 1.3808247495424285e-05,
"loss": 0.6853,
"step": 101
},
{
"epoch": 1.1590909090909092,
"grad_norm": 0.015391316610067228,
"learning_rate": 1.3511193016379079e-05,
"loss": 0.6815,
"step": 102
},
{
"epoch": 1.1704545454545454,
"grad_norm": 0.017920345666710375,
"learning_rate": 1.3214727122971431e-05,
"loss": 0.6455,
"step": 103
},
{
"epoch": 1.1818181818181819,
"grad_norm": 0.016706005732186276,
"learning_rate": 1.2918967020163978e-05,
"loss": 0.7431,
"step": 104
},
{
"epoch": 1.1931818181818181,
"grad_norm": 0.014877322443809265,
"learning_rate": 1.2624029633891776e-05,
"loss": 0.7597,
"step": 105
},
{
"epoch": 1.2045454545454546,
"grad_norm": 0.015368071315037188,
"learning_rate": 1.2330031564836749e-05,
"loss": 0.7589,
"step": 106
},
{
"epoch": 1.2159090909090908,
"grad_norm": 0.016382740958481367,
"learning_rate": 1.203708904233071e-05,
"loss": 0.5941,
"step": 107
},
{
"epoch": 1.2272727272727273,
"grad_norm": 0.015595461127570374,
"learning_rate": 1.1745317878405229e-05,
"loss": 0.8026,
"step": 108
},
{
"epoch": 1.2386363636363638,
"grad_norm": 0.01824573515682411,
"learning_rate": 1.1454833422006428e-05,
"loss": 0.7106,
"step": 109
},
{
"epoch": 1.25,
"grad_norm": 0.014246226061057972,
"learning_rate": 1.116575051339288e-05,
"loss": 0.6226,
"step": 110
},
{
"epoch": 1.2613636363636362,
"grad_norm": 0.015194199552674691,
"learning_rate": 1.087818343873462e-05,
"loss": 0.6878,
"step": 111
},
{
"epoch": 1.2727272727272727,
"grad_norm": 0.018903921584870875,
"learning_rate": 1.0592245884931188e-05,
"loss": 0.6735,
"step": 112
},
{
"epoch": 1.2840909090909092,
"grad_norm": 0.014814829445622978,
"learning_rate": 1.0308050894666596e-05,
"loss": 0.7126,
"step": 113
},
{
"epoch": 1.2954545454545454,
"grad_norm": 0.0175575648464779,
"learning_rate": 1.0025710821718983e-05,
"loss": 0.6755,
"step": 114
},
{
"epoch": 1.3068181818181819,
"grad_norm": 0.0193390892171132,
"learning_rate": 9.745337286542634e-06,
"loss": 0.6402,
"step": 115
},
{
"epoch": 1.3181818181818181,
"grad_norm": 0.017074854773535725,
"learning_rate": 9.467041132139884e-06,
"loss": 0.7193,
"step": 116
},
{
"epoch": 1.3295454545454546,
"grad_norm": 0.016645927361554368,
"learning_rate": 9.190932380240386e-06,
"loss": 0.6427,
"step": 117
},
{
"epoch": 1.3409090909090908,
"grad_norm": 0.01829438342338641,
"learning_rate": 8.917120187805091e-06,
"loss": 0.7091,
"step": 118
},
{
"epoch": 1.3522727272727273,
"grad_norm": 0.018172057943652505,
"learning_rate": 8.645712803872084e-06,
"loss": 0.7768,
"step": 119
},
{
"epoch": 1.3636363636363638,
"grad_norm": 0.019067265925995506,
"learning_rate": 8.376817526761327e-06,
"loss": 0.8617,
"step": 120
},
{
"epoch": 1.375,
"grad_norm": 0.016103807777135562,
"learning_rate": 8.110540661655329e-06,
"loss": 0.6217,
"step": 121
},
{
"epoch": 1.3863636363636362,
"grad_norm": 0.01791717347539582,
"learning_rate": 7.846987478572411e-06,
"loss": 0.7356,
"step": 122
},
{
"epoch": 1.3977272727272727,
"grad_norm": 0.018325232410528938,
"learning_rate": 7.586262170749158e-06,
"loss": 0.6521,
"step": 123
},
{
"epoch": 1.4090909090909092,
"grad_norm": 0.018905797778818914,
"learning_rate": 7.3284678134486685e-06,
"loss": 0.7509,
"step": 124
},
{
"epoch": 1.4204545454545454,
"grad_norm": 0.018602260408145506,
"learning_rate": 7.073706323210714e-06,
"loss": 0.7016,
"step": 125
},
{
"epoch": 1.4318181818181819,
"grad_norm": 0.016514651244433562,
"learning_rate": 6.822078417559991e-06,
"loss": 0.8002,
"step": 126
},
{
"epoch": 1.4431818181818181,
"grad_norm": 0.018833593987548795,
"learning_rate": 6.573683575188436e-06,
"loss": 0.803,
"step": 127
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.017644785970119186,
"learning_rate": 6.328619996627272e-06,
"loss": 0.677,
"step": 128
},
{
"epoch": 1.4659090909090908,
"grad_norm": 0.019347828980988285,
"learning_rate": 6.086984565424345e-06,
"loss": 0.7148,
"step": 129
},
{
"epoch": 1.4772727272727273,
"grad_norm": 0.019300543348893917,
"learning_rate": 5.8488728098421345e-06,
"loss": 0.7655,
"step": 130
},
{
"epoch": 1.4886363636363638,
"grad_norm": 0.016574344974984206,
"learning_rate": 5.614378865091602e-06,
"loss": 0.6653,
"step": 131
},
{
"epoch": 1.5,
"grad_norm": 0.016118492967204696,
"learning_rate": 5.383595436116703e-06,
"loss": 0.6845,
"step": 132
},
{
"epoch": 1.5113636363636362,
"grad_norm": 0.016913099517869973,
"learning_rate": 5.1566137609443696e-06,
"loss": 0.5999,
"step": 133
},
{
"epoch": 1.5227272727272727,
"grad_norm": 0.017324770814363996,
"learning_rate": 4.933523574614447e-06,
"loss": 0.6307,
"step": 134
},
{
"epoch": 1.5340909090909092,
"grad_norm": 0.018088958011481436,
"learning_rate": 4.714413073703804e-06,
"loss": 0.8114,
"step": 135
},
{
"epoch": 1.5454545454545454,
"grad_norm": 0.018107305774406484,
"learning_rate": 4.4993688814586215e-06,
"loss": 0.7423,
"step": 136
},
{
"epoch": 1.5568181818181817,
"grad_norm": 0.017510667127987316,
"learning_rate": 4.2884760135487535e-06,
"loss": 0.759,
"step": 137
},
{
"epoch": 1.5681818181818183,
"grad_norm": 0.016966967177959615,
"learning_rate": 4.081817844457589e-06,
"loss": 0.7095,
"step": 138
},
{
"epoch": 1.5795454545454546,
"grad_norm": 0.01600098768790145,
"learning_rate": 3.879476074520731e-06,
"loss": 0.6216,
"step": 139
},
{
"epoch": 1.5909090909090908,
"grad_norm": 0.0172344148854807,
"learning_rate": 3.6815306976265466e-06,
"loss": 0.6527,
"step": 140
},
{
"epoch": 1.6022727272727273,
"grad_norm": 0.017950252109441598,
"learning_rate": 3.4880599695913786e-06,
"loss": 0.6526,
"step": 141
},
{
"epoch": 1.6136363636363638,
"grad_norm": 0.02014187993227391,
"learning_rate": 3.29914037722182e-06,
"loss": 0.6438,
"step": 142
},
{
"epoch": 1.625,
"grad_norm": 0.01738551549819398,
"learning_rate": 3.114846608076387e-06,
"loss": 0.6948,
"step": 143
},
{
"epoch": 1.6363636363636362,
"grad_norm": 0.020356218803820863,
"learning_rate": 2.935251520938528e-06,
"loss": 0.7257,
"step": 144
},
{
"epoch": 1.6477272727272727,
"grad_norm": 0.017056708535399843,
"learning_rate": 2.7604261170125895e-06,
"loss": 0.7378,
"step": 145
},
{
"epoch": 1.6590909090909092,
"grad_norm": 0.018468485707882423,
"learning_rate": 2.590439511854144e-06,
"loss": 0.6424,
"step": 146
},
{
"epoch": 1.6704545454545454,
"grad_norm": 0.019802120923836132,
"learning_rate": 2.425358908045851e-06,
"loss": 0.6856,
"step": 147
},
{
"epoch": 1.6818181818181817,
"grad_norm": 0.01785608788569529,
"learning_rate": 2.265249568629539e-06,
"loss": 0.7243,
"step": 148
},
{
"epoch": 1.6931818181818183,
"grad_norm": 0.017835761915165615,
"learning_rate": 2.1101747913050855e-06,
"loss": 0.6239,
"step": 149
},
{
"epoch": 1.7045454545454546,
"grad_norm": 0.02106800708452751,
"learning_rate": 1.9601958834062627e-06,
"loss": 0.7743,
"step": 150
},
{
"epoch": 1.7159090909090908,
"grad_norm": 0.02012027454636489,
"learning_rate": 1.8153721376634947e-06,
"loss": 0.7849,
"step": 151
},
{
"epoch": 1.7272727272727273,
"grad_norm": 0.017124639920244208,
"learning_rate": 1.6757608087630249e-06,
"loss": 0.7285,
"step": 152
},
{
"epoch": 1.7386363636363638,
"grad_norm": 0.016243073549560335,
"learning_rate": 1.5414170907118153e-06,
"loss": 0.7609,
"step": 153
},
{
"epoch": 1.75,
"grad_norm": 0.01700333660952848,
"learning_rate": 1.412394095017151e-06,
"loss": 0.6887,
"step": 154
},
{
"epoch": 1.7613636363636362,
"grad_norm": 0.01661324924856346,
"learning_rate": 1.2887428296894809e-06,
"loss": 0.7988,
"step": 155
},
{
"epoch": 1.7727272727272727,
"grad_norm": 0.016555144729019467,
"learning_rate": 1.1705121790769014e-06,
"loss": 0.7122,
"step": 156
},
{
"epoch": 1.7840909090909092,
"grad_norm": 0.01645576417466405,
"learning_rate": 1.0577488845391946e-06,
"loss": 0.7107,
"step": 157
},
{
"epoch": 1.7954545454545454,
"grad_norm": 0.01714114500980707,
"learning_rate": 9.504975259690835e-07,
"loss": 0.7371,
"step": 158
},
{
"epoch": 1.8068181818181817,
"grad_norm": 0.01725824524428257,
"learning_rate": 8.488005041679842e-07,
"loss": 0.6346,
"step": 159
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.018332594317410387,
"learning_rate": 7.526980240832598e-07,
"loss": 0.7321,
"step": 160
},
{
"epoch": 1.8295454545454546,
"grad_norm": 0.017728012181319464,
"learning_rate": 6.622280789135876e-07,
"loss": 0.6729,
"step": 161
},
{
"epoch": 1.8409090909090908,
"grad_norm": 0.018834032403160974,
"learning_rate": 5.774264350886937e-07,
"loss": 0.6841,
"step": 162
},
{
"epoch": 1.8522727272727273,
"grad_norm": 0.016746960537530962,
"learning_rate": 4.983266181294427e-07,
"loss": 0.6954,
"step": 163
},
{
"epoch": 1.8636363636363638,
"grad_norm": 0.01747119672545748,
"learning_rate": 4.2495989939384916e-07,
"loss": 0.7679,
"step": 164
},
{
"epoch": 1.875,
"grad_norm": 0.016865810971941564,
"learning_rate": 3.573552837142391e-07,
"loss": 0.6358,
"step": 165
},
{
"epoch": 1.8863636363636362,
"grad_norm": 0.022834024536834356,
"learning_rate": 2.9553949793045874e-07,
"loss": 0.7111,
"step": 166
},
{
"epoch": 1.8977272727272727,
"grad_norm": 0.016510566074369487,
"learning_rate": 2.395369803236902e-07,
"loss": 0.603,
"step": 167
},
{
"epoch": 1.9090909090909092,
"grad_norm": 0.01701585871989393,
"learning_rate": 1.893698709549996e-07,
"loss": 0.6871,
"step": 168
},
{
"epoch": 1.9204545454545454,
"grad_norm": 0.019587545442803056,
"learning_rate": 1.4505800291247207e-07,
"loss": 0.795,
"step": 169
},
{
"epoch": 1.9318181818181817,
"grad_norm": 0.017848972998695284,
"learning_rate": 1.0661889447039886e-07,
"loss": 0.7491,
"step": 170
},
{
"epoch": 1.9431818181818183,
"grad_norm": 0.023343303419197096,
"learning_rate": 7.40677421635888e-08,
"loss": 0.7785,
"step": 171
},
{
"epoch": 1.9545454545454546,
"grad_norm": 0.017228097663166443,
"learning_rate": 4.741741477956252e-08,
"loss": 0.7956,
"step": 172
},
{
"epoch": 1.9659090909090908,
"grad_norm": 0.01701019181440642,
"learning_rate": 2.6678448271005296e-08,
"loss": 0.7096,
"step": 173
},
{
"epoch": 1.9772727272727273,
"grad_norm": 0.018268742287283965,
"learning_rate": 1.1859041590472352e-08,
"loss": 0.7675,
"step": 174
},
{
"epoch": 1.9886363636363638,
"grad_norm": 0.01786805660364942,
"learning_rate": 2.9650534490166527e-09,
"loss": 0.6691,
"step": 175
},
{
"epoch": 2.0,
"grad_norm": 0.01895454835226293,
"learning_rate": 0.0,
"loss": 0.7506,
"step": 176
},
{
"epoch": 2.0,
"step": 176,
"total_flos": 3054747482324992.0,
"train_loss": 0.7323629280382936,
"train_runtime": 3676.475,
"train_samples_per_second": 1.526,
"train_steps_per_second": 0.048
}
],
"logging_steps": 1,
"max_steps": 176,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3054747482324992.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}