Zero-Gemma-12b-bs16-e1 / trainer_state.json
bethrezen's picture
Upload folder using huggingface_hub
e3f3ab5 verified
Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN, "... is not valid JSON
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 168,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005952380952380952,
"grad_norm": 9.777442932128906,
"learning_rate": 0.0,
"loss": 1.0466,
"step": 1
},
{
"epoch": 0.011904761904761904,
"grad_norm": 9.990398406982422,
"learning_rate": 6.060606060606061e-06,
"loss": 1.0202,
"step": 2
},
{
"epoch": 0.017857142857142856,
"grad_norm": 4.0702080726623535,
"learning_rate": 1.2121212121212122e-05,
"loss": 1.0372,
"step": 3
},
{
"epoch": 0.023809523809523808,
"grad_norm": 3.9334001541137695,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.9981,
"step": 4
},
{
"epoch": 0.02976190476190476,
"grad_norm": 1.597848653793335,
"learning_rate": 2.4242424242424244e-05,
"loss": 0.8703,
"step": 5
},
{
"epoch": 0.03571428571428571,
"grad_norm": 1.0396702289581299,
"learning_rate": 3.0303030303030306e-05,
"loss": 0.8549,
"step": 6
},
{
"epoch": 0.041666666666666664,
"grad_norm": 1.0887113809585571,
"learning_rate": 3.6363636363636364e-05,
"loss": 0.8365,
"step": 7
},
{
"epoch": 0.047619047619047616,
"grad_norm": 0.7733088731765747,
"learning_rate": 4.242424242424243e-05,
"loss": 0.8323,
"step": 8
},
{
"epoch": 0.05357142857142857,
"grad_norm": 1.1227962970733643,
"learning_rate": 4.848484848484849e-05,
"loss": 0.789,
"step": 9
},
{
"epoch": 0.05952380952380952,
"grad_norm": 0.44949159026145935,
"learning_rate": 5.4545454545454546e-05,
"loss": 0.7831,
"step": 10
},
{
"epoch": 0.06547619047619048,
"grad_norm": 0.5854464173316956,
"learning_rate": 6.060606060606061e-05,
"loss": 0.7654,
"step": 11
},
{
"epoch": 0.07142857142857142,
"grad_norm": 0.8678174018859863,
"learning_rate": 6.666666666666667e-05,
"loss": 0.7826,
"step": 12
},
{
"epoch": 0.07738095238095238,
"grad_norm": 0.35696980357170105,
"learning_rate": 7.272727272727273e-05,
"loss": 0.7699,
"step": 13
},
{
"epoch": 0.08333333333333333,
"grad_norm": 0.25360599160194397,
"learning_rate": 7.878787878787879e-05,
"loss": 0.7555,
"step": 14
},
{
"epoch": 0.08928571428571429,
"grad_norm": 0.2302754670381546,
"learning_rate": 8.484848484848486e-05,
"loss": 0.7574,
"step": 15
},
{
"epoch": 0.09523809523809523,
"grad_norm": 0.25597596168518066,
"learning_rate": 9.090909090909092e-05,
"loss": 0.7647,
"step": 16
},
{
"epoch": 0.10119047619047619,
"grad_norm": 0.22872565686702728,
"learning_rate": 9.696969696969698e-05,
"loss": 0.729,
"step": 17
},
{
"epoch": 0.10714285714285714,
"grad_norm": 0.1873745173215866,
"learning_rate": 0.00010303030303030303,
"loss": 0.714,
"step": 18
},
{
"epoch": 0.1130952380952381,
"grad_norm": 0.18152189254760742,
"learning_rate": 0.00010909090909090909,
"loss": 0.754,
"step": 19
},
{
"epoch": 0.11904761904761904,
"grad_norm": 0.21586984395980835,
"learning_rate": 0.00011515151515151516,
"loss": 0.7352,
"step": 20
},
{
"epoch": 0.125,
"grad_norm": 0.2593728303909302,
"learning_rate": 0.00012121212121212122,
"loss": 0.7023,
"step": 21
},
{
"epoch": 0.13095238095238096,
"grad_norm": 0.17135585844516754,
"learning_rate": 0.00012727272727272728,
"loss": 0.7272,
"step": 22
},
{
"epoch": 0.13690476190476192,
"grad_norm": 0.1786317229270935,
"learning_rate": 0.00013333333333333334,
"loss": 0.7325,
"step": 23
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.1994478851556778,
"learning_rate": 0.0001393939393939394,
"loss": 0.735,
"step": 24
},
{
"epoch": 0.1488095238095238,
"grad_norm": 0.16063907742500305,
"learning_rate": 0.00014545454545454546,
"loss": 0.7135,
"step": 25
},
{
"epoch": 0.15476190476190477,
"grad_norm": 0.15729865431785583,
"learning_rate": 0.00015151515151515152,
"loss": 0.7008,
"step": 26
},
{
"epoch": 0.16071428571428573,
"grad_norm": 0.13310623168945312,
"learning_rate": 0.00015757575757575757,
"loss": 0.7205,
"step": 27
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.17777495086193085,
"learning_rate": 0.00016363636363636366,
"loss": 0.7139,
"step": 28
},
{
"epoch": 0.17261904761904762,
"grad_norm": 0.1588098555803299,
"learning_rate": 0.00016969696969696972,
"loss": 0.7084,
"step": 29
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.22337716817855835,
"learning_rate": 0.00017575757575757578,
"loss": 0.716,
"step": 30
},
{
"epoch": 0.18452380952380953,
"grad_norm": 0.1471233069896698,
"learning_rate": 0.00018181818181818183,
"loss": 0.6875,
"step": 31
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.26265543699264526,
"learning_rate": 0.0001878787878787879,
"loss": 0.7153,
"step": 32
},
{
"epoch": 0.19642857142857142,
"grad_norm": 0.1488102227449417,
"learning_rate": 0.00019393939393939395,
"loss": 0.7085,
"step": 33
},
{
"epoch": 0.20238095238095238,
"grad_norm": 0.13356836140155792,
"learning_rate": 0.0002,
"loss": 0.6814,
"step": 34
},
{
"epoch": 0.20833333333333334,
"grad_norm": 0.15331007540225983,
"learning_rate": 0.00019999462497359466,
"loss": 0.6977,
"step": 35
},
{
"epoch": 0.21428571428571427,
"grad_norm": 0.13369083404541016,
"learning_rate": 0.0001999785004721968,
"loss": 0.6935,
"step": 36
},
{
"epoch": 0.22023809523809523,
"grad_norm": 0.1226360872387886,
"learning_rate": 0.00019995162822919883,
"loss": 0.6949,
"step": 37
},
{
"epoch": 0.2261904761904762,
"grad_norm": 0.13010923564434052,
"learning_rate": 0.00019991401113338104,
"loss": 0.6961,
"step": 38
},
{
"epoch": 0.23214285714285715,
"grad_norm": 0.1279260516166687,
"learning_rate": 0.00019986565322860115,
"loss": 0.7031,
"step": 39
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.13481521606445312,
"learning_rate": 0.00019980655971335945,
"loss": 0.7104,
"step": 40
},
{
"epoch": 0.24404761904761904,
"grad_norm": 0.12825317680835724,
"learning_rate": 0.00019973673694024,
"loss": 0.6988,
"step": 41
},
{
"epoch": 0.25,
"grad_norm": 0.12685425579547882,
"learning_rate": 0.0001996561924152278,
"loss": 0.6881,
"step": 42
},
{
"epoch": 0.25595238095238093,
"grad_norm": 0.12878207862377167,
"learning_rate": 0.0001995649347969019,
"loss": 0.7119,
"step": 43
},
{
"epoch": 0.2619047619047619,
"grad_norm": 0.14662198722362518,
"learning_rate": 0.00019946297389550433,
"loss": 0.6934,
"step": 44
},
{
"epoch": 0.26785714285714285,
"grad_norm": 0.12851360440254211,
"learning_rate": 0.0001993503206718859,
"loss": 0.6927,
"step": 45
},
{
"epoch": 0.27380952380952384,
"grad_norm": 0.12815622985363007,
"learning_rate": 0.00019922698723632767,
"loss": 0.6959,
"step": 46
},
{
"epoch": 0.27976190476190477,
"grad_norm": 0.13864649832248688,
"learning_rate": 0.00019909298684723904,
"loss": 0.6948,
"step": 47
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.13679292798042297,
"learning_rate": 0.00019894833390973266,
"loss": 0.6738,
"step": 48
},
{
"epoch": 0.2916666666666667,
"grad_norm": 0.13636942207813263,
"learning_rate": 0.0001987930439740757,
"loss": 0.6801,
"step": 49
},
{
"epoch": 0.2976190476190476,
"grad_norm": 0.1284656971693039,
"learning_rate": 0.0001986271337340182,
"loss": 0.6966,
"step": 50
},
{
"epoch": 0.30357142857142855,
"grad_norm": 0.14213284850120544,
"learning_rate": 0.0001984506210249986,
"loss": 0.7011,
"step": 51
},
{
"epoch": 0.30952380952380953,
"grad_norm": 0.1259792149066925,
"learning_rate": 0.00019826352482222638,
"loss": 0.6949,
"step": 52
},
{
"epoch": 0.31547619047619047,
"grad_norm": 0.14440137147903442,
"learning_rate": 0.0001980658652386421,
"loss": 0.6697,
"step": 53
},
{
"epoch": 0.32142857142857145,
"grad_norm": 0.13951236009597778,
"learning_rate": 0.00019785766352275542,
"loss": 0.6847,
"step": 54
},
{
"epoch": 0.3273809523809524,
"grad_norm": 0.14412100613117218,
"learning_rate": 0.00019763894205636072,
"loss": 0.6996,
"step": 55
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.1487567126750946,
"learning_rate": 0.00019740972435213115,
"loss": 0.6855,
"step": 56
},
{
"epoch": 0.3392857142857143,
"grad_norm": 0.1544160693883896,
"learning_rate": 0.00019717003505109095,
"loss": 0.6944,
"step": 57
},
{
"epoch": 0.34523809523809523,
"grad_norm": NaN,
"learning_rate": 0.00019691989991996663,
"loss": 0.6903,
"step": 58
},
{
"epoch": 0.35119047619047616,
"grad_norm": 0.16976267099380493,
"learning_rate": 0.00019665934584841682,
"loss": 0.6904,
"step": 59
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.9541064500808716,
"learning_rate": 0.00019638840084614182,
"loss": 0.7448,
"step": 60
},
{
"epoch": 0.3630952380952381,
"grad_norm": NaN,
"learning_rate": 0.00019610709403987246,
"loss": 0.7507,
"step": 61
},
{
"epoch": 0.36904761904761907,
"grad_norm": NaN,
"learning_rate": 0.000195815455670239,
"loss": 0.7467,
"step": 62
},
{
"epoch": 0.375,
"grad_norm": 0.7270437479019165,
"learning_rate": 0.0001955135170885202,
"loss": 0.7404,
"step": 63
},
{
"epoch": 0.38095238095238093,
"grad_norm": NaN,
"learning_rate": 0.00019520131075327298,
"loss": 0.7636,
"step": 64
},
{
"epoch": 0.3869047619047619,
"grad_norm": 0.9643076658248901,
"learning_rate": 0.00019487887022684336,
"loss": 0.7888,
"step": 65
},
{
"epoch": 0.39285714285714285,
"grad_norm": 0.6570881605148315,
"learning_rate": 0.00019454623017175812,
"loss": 0.7447,
"step": 66
},
{
"epoch": 0.39880952380952384,
"grad_norm": NaN,
"learning_rate": 0.0001942034263469989,
"loss": 0.6974,
"step": 67
},
{
"epoch": 0.40476190476190477,
"grad_norm": 0.27752581238746643,
"learning_rate": 0.00019385049560415794,
"loss": 0.7294,
"step": 68
},
{
"epoch": 0.4107142857142857,
"grad_norm": NaN,
"learning_rate": 0.00019348747588347637,
"loss": 0.7808,
"step": 69
},
{
"epoch": 0.4166666666666667,
"grad_norm": NaN,
"learning_rate": 0.00019311440620976597,
"loss": 0.7768,
"step": 70
},
{
"epoch": 0.4226190476190476,
"grad_norm": NaN,
"learning_rate": 0.00019273132668821364,
"loss": 0.7695,
"step": 71
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.7289093136787415,
"learning_rate": 0.00019233827850007027,
"loss": 0.7551,
"step": 72
},
{
"epoch": 0.43452380952380953,
"grad_norm": NaN,
"learning_rate": 0.00019193530389822363,
"loss": 0.7231,
"step": 73
},
{
"epoch": 0.44047619047619047,
"grad_norm": 0.47448015213012695,
"learning_rate": 0.0001915224462026563,
"loss": 0.715,
"step": 74
},
{
"epoch": 0.44642857142857145,
"grad_norm": NaN,
"learning_rate": 0.0001910997497957885,
"loss": 0.7635,
"step": 75
},
{
"epoch": 0.4523809523809524,
"grad_norm": 0.730553150177002,
"learning_rate": 0.00019066726011770726,
"loss": 0.7343,
"step": 76
},
{
"epoch": 0.4583333333333333,
"grad_norm": 0.5381748080253601,
"learning_rate": 0.00019022502366128135,
"loss": 0.7316,
"step": 77
},
{
"epoch": 0.4642857142857143,
"grad_norm": NaN,
"learning_rate": 0.0001897730879671634,
"loss": 0.7176,
"step": 78
},
{
"epoch": 0.47023809523809523,
"grad_norm": NaN,
"learning_rate": 0.00018931150161867916,
"loss": 0.6801,
"step": 79
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.2901756167411804,
"learning_rate": 0.0001888403142366049,
"loss": 0.7292,
"step": 80
},
{
"epoch": 0.48214285714285715,
"grad_norm": 0.8726024031639099,
"learning_rate": 0.00018835957647383303,
"loss": 0.8004,
"step": 81
},
{
"epoch": 0.4880952380952381,
"grad_norm": NaN,
"learning_rate": 0.00018786934000992688,
"loss": 0.7288,
"step": 82
},
{
"epoch": 0.49404761904761907,
"grad_norm": NaN,
"learning_rate": 0.00018736965754556528,
"loss": 0.7356,
"step": 83
},
{
"epoch": 0.5,
"grad_norm": 0.3493836224079132,
"learning_rate": 0.00018686058279687698,
"loss": 0.7117,
"step": 84
},
{
"epoch": 0.5059523809523809,
"grad_norm": 0.8651177883148193,
"learning_rate": 0.00018634217048966637,
"loss": 0.7657,
"step": 85
},
{
"epoch": 0.5119047619047619,
"grad_norm": 0.29697635769844055,
"learning_rate": 0.0001858144763535302,
"loss": 0.7119,
"step": 86
},
{
"epoch": 0.5178571428571429,
"grad_norm": NaN,
"learning_rate": 0.00018527755711586678,
"loss": 0.7415,
"step": 87
},
{
"epoch": 0.5238095238095238,
"grad_norm": 0.4528316557407379,
"learning_rate": 0.00018473147049577774,
"loss": 0.7175,
"step": 88
},
{
"epoch": 0.5297619047619048,
"grad_norm": NaN,
"learning_rate": 0.00018417627519786315,
"loss": 0.7537,
"step": 89
},
{
"epoch": 0.5357142857142857,
"grad_norm": NaN,
"learning_rate": 0.00018361203090591071,
"loss": 0.7542,
"step": 90
},
{
"epoch": 0.5416666666666666,
"grad_norm": NaN,
"learning_rate": 0.00018303879827647975,
"loss": 0.7518,
"step": 91
},
{
"epoch": 0.5476190476190477,
"grad_norm": 0.6303791403770447,
"learning_rate": 0.00018245663893238075,
"loss": 0.7584,
"step": 92
},
{
"epoch": 0.5535714285714286,
"grad_norm": NaN,
"learning_rate": 0.00018186561545605054,
"loss": 0.7056,
"step": 93
},
{
"epoch": 0.5595238095238095,
"grad_norm": 0.4130224287509918,
"learning_rate": 0.00018126579138282503,
"loss": 0.7339,
"step": 94
},
{
"epoch": 0.5654761904761905,
"grad_norm": NaN,
"learning_rate": 0.00018065723119410884,
"loss": 0.7293,
"step": 95
},
{
"epoch": 0.5714285714285714,
"grad_norm": NaN,
"learning_rate": 0.0001800400003104436,
"loss": 0.7371,
"step": 96
},
{
"epoch": 0.5773809523809523,
"grad_norm": NaN,
"learning_rate": 0.00017941416508447536,
"loss": 0.7338,
"step": 97
},
{
"epoch": 0.5833333333333334,
"grad_norm": 0.5456637144088745,
"learning_rate": 0.00017877979279382135,
"loss": 0.7373,
"step": 98
},
{
"epoch": 0.5892857142857143,
"grad_norm": 0.9624040722846985,
"learning_rate": 0.0001781369516338378,
"loss": 0.7221,
"step": 99
},
{
"epoch": 0.5952380952380952,
"grad_norm": NaN,
"learning_rate": 0.000177485710710289,
"loss": 0.6884,
"step": 100
},
{
"epoch": 0.6011904761904762,
"grad_norm": NaN,
"learning_rate": 0.00017682614003191807,
"loss": 0.6891,
"step": 101
},
{
"epoch": 0.6071428571428571,
"grad_norm": NaN,
"learning_rate": 0.0001761583105029213,
"loss": 0.6814,
"step": 102
},
{
"epoch": 0.6130952380952381,
"grad_norm": NaN,
"learning_rate": 0.00017548229391532572,
"loss": 0.728,
"step": 103
},
{
"epoch": 0.6190476190476191,
"grad_norm": NaN,
"learning_rate": 0.00017479816294127152,
"loss": 0.7014,
"step": 104
},
{
"epoch": 0.625,
"grad_norm": 0.18876609206199646,
"learning_rate": 0.0001741059911251997,
"loss": 0.6979,
"step": 105
},
{
"epoch": 0.6309523809523809,
"grad_norm": 0.8331655859947205,
"learning_rate": 0.00017340585287594604,
"loss": 0.8229,
"step": 106
},
{
"epoch": 0.6369047619047619,
"grad_norm": 0.3347514271736145,
"learning_rate": 0.00017269782345874203,
"loss": 0.6952,
"step": 107
},
{
"epoch": 0.6428571428571429,
"grad_norm": 0.3232339024543762,
"learning_rate": 0.00017198197898712404,
"loss": 0.7005,
"step": 108
},
{
"epoch": 0.6488095238095238,
"grad_norm": 0.47910553216934204,
"learning_rate": 0.00017125839641475072,
"loss": 0.7186,
"step": 109
},
{
"epoch": 0.6547619047619048,
"grad_norm": 0.5297046303749084,
"learning_rate": 0.00017052715352713075,
"loss": 0.7412,
"step": 110
},
{
"epoch": 0.6607142857142857,
"grad_norm": 0.27944743633270264,
"learning_rate": 0.00016978832893326074,
"loss": 0.729,
"step": 111
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.29528820514678955,
"learning_rate": 0.0001690420020571747,
"loss": 0.7299,
"step": 112
},
{
"epoch": 0.6726190476190477,
"grad_norm": 0.2554149925708771,
"learning_rate": 0.00016828825312940592,
"loss": 0.7413,
"step": 113
},
{
"epoch": 0.6785714285714286,
"grad_norm": 0.20665590465068817,
"learning_rate": 0.00016752716317836229,
"loss": 0.7038,
"step": 114
},
{
"epoch": 0.6845238095238095,
"grad_norm": 0.1837390661239624,
"learning_rate": 0.00016675881402161536,
"loss": 0.7124,
"step": 115
},
{
"epoch": 0.6904761904761905,
"grad_norm": 0.16445870697498322,
"learning_rate": 0.00016598328825710533,
"loss": 0.7087,
"step": 116
},
{
"epoch": 0.6964285714285714,
"grad_norm": 0.16310396790504456,
"learning_rate": 0.00016520066925426144,
"loss": 0.7202,
"step": 117
},
{
"epoch": 0.7023809523809523,
"grad_norm": 0.31951698660850525,
"learning_rate": 0.0001644110411450398,
"loss": 0.7107,
"step": 118
},
{
"epoch": 0.7083333333333334,
"grad_norm": 0.16259659826755524,
"learning_rate": 0.00016361448881487914,
"loss": 0.735,
"step": 119
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.15429353713989258,
"learning_rate": 0.0001628110978935756,
"loss": 0.6837,
"step": 120
},
{
"epoch": 0.7202380952380952,
"grad_norm": 0.1663048267364502,
"learning_rate": 0.00016200095474607753,
"loss": 0.6939,
"step": 121
},
{
"epoch": 0.7261904761904762,
"grad_norm": 0.23671415448188782,
"learning_rate": 0.0001611841464632011,
"loss": 0.7126,
"step": 122
},
{
"epoch": 0.7321428571428571,
"grad_norm": 0.13842295110225677,
"learning_rate": 0.00016036076085226814,
"loss": 0.7114,
"step": 123
},
{
"epoch": 0.7380952380952381,
"grad_norm": 0.1538868099451065,
"learning_rate": 0.0001595308864276666,
"loss": 0.7297,
"step": 124
},
{
"epoch": 0.7440476190476191,
"grad_norm": 0.14543598890304565,
"learning_rate": 0.0001586946124013354,
"loss": 0.6717,
"step": 125
},
{
"epoch": 0.75,
"grad_norm": 0.29232776165008545,
"learning_rate": 0.00015785202867317407,
"loss": 0.6727,
"step": 126
},
{
"epoch": 0.7559523809523809,
"grad_norm": 0.15757755935192108,
"learning_rate": 0.00015700322582137827,
"loss": 0.7004,
"step": 127
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.13468873500823975,
"learning_rate": 0.0001561482950927029,
"loss": 0.7022,
"step": 128
},
{
"epoch": 0.7678571428571429,
"grad_norm": 0.15298475325107574,
"learning_rate": 0.00015528732839265272,
"loss": 0.7037,
"step": 129
},
{
"epoch": 0.7738095238095238,
"grad_norm": 0.13249006867408752,
"learning_rate": 0.00015442041827560274,
"loss": 0.6854,
"step": 130
},
{
"epoch": 0.7797619047619048,
"grad_norm": 0.134177565574646,
"learning_rate": 0.00015354765793484834,
"loss": 0.6762,
"step": 131
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.12376086413860321,
"learning_rate": 0.000152669141192587,
"loss": 0.678,
"step": 132
},
{
"epoch": 0.7916666666666666,
"grad_norm": 0.12841054797172546,
"learning_rate": 0.00015178496248983254,
"loss": 0.6848,
"step": 133
},
{
"epoch": 0.7976190476190477,
"grad_norm": 0.14284221827983856,
"learning_rate": 0.00015089521687626243,
"loss": 0.6801,
"step": 134
},
{
"epoch": 0.8035714285714286,
"grad_norm": 0.16310954093933105,
"learning_rate": 0.00015000000000000001,
"loss": 0.6784,
"step": 135
},
{
"epoch": 0.8095238095238095,
"grad_norm": 0.31779271364212036,
"learning_rate": 0.00014909940809733222,
"loss": 0.7084,
"step": 136
},
{
"epoch": 0.8154761904761905,
"grad_norm": 0.2745242416858673,
"learning_rate": 0.00014819353798236427,
"loss": 0.6729,
"step": 137
},
{
"epoch": 0.8214285714285714,
"grad_norm": NaN,
"learning_rate": 0.00014728248703661182,
"loss": 0.686,
"step": 138
},
{
"epoch": 0.8273809523809523,
"grad_norm": NaN,
"learning_rate": 0.00014636635319853275,
"loss": 0.6863,
"step": 139
},
{
"epoch": 0.8333333333333334,
"grad_norm": NaN,
"learning_rate": 0.00014544523495299842,
"loss": 0.6856,
"step": 140
},
{
"epoch": 0.8392857142857143,
"grad_norm": 0.13249583542346954,
"learning_rate": 0.0001445192313207067,
"loss": 0.7115,
"step": 141
},
{
"epoch": 0.8452380952380952,
"grad_norm": 0.9579920768737793,
"learning_rate": 0.00014358844184753712,
"loss": 0.758,
"step": 142
},
{
"epoch": 0.8511904761904762,
"grad_norm": 0.8529394865036011,
"learning_rate": 0.00014265296659384956,
"loss": 0.794,
"step": 143
},
{
"epoch": 0.8571428571428571,
"grad_norm": NaN,
"learning_rate": 0.0001417129061237278,
"loss": 0.7051,
"step": 144
},
{
"epoch": 0.8630952380952381,
"grad_norm": 0.3197040259838104,
"learning_rate": 0.00014076836149416887,
"loss": 0.7122,
"step": 145
},
{
"epoch": 0.8690476190476191,
"grad_norm": 0.6125820279121399,
"learning_rate": 0.00013981943424421932,
"loss": 0.7432,
"step": 146
},
{
"epoch": 0.875,
"grad_norm": 0.37258052825927734,
"learning_rate": 0.00013886622638405952,
"loss": 0.7193,
"step": 147
},
{
"epoch": 0.8809523809523809,
"grad_norm": NaN,
"learning_rate": 0.00013790884038403795,
"loss": 0.7056,
"step": 148
},
{
"epoch": 0.8869047619047619,
"grad_norm": NaN,
"learning_rate": 0.00013694737916365517,
"loss": 0.7286,
"step": 149
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.41245830059051514,
"learning_rate": 0.0001359819460805001,
"loss": 0.7262,
"step": 150
},
{
"epoch": 0.8988095238095238,
"grad_norm": NaN,
"learning_rate": 0.00013501264491913906,
"loss": 0.7652,
"step": 151
},
{
"epoch": 0.9047619047619048,
"grad_norm": 0.8128747344017029,
"learning_rate": 0.00013403957987995882,
"loss": 0.7519,
"step": 152
},
{
"epoch": 0.9107142857142857,
"grad_norm": NaN,
"learning_rate": 0.00013306285556796495,
"loss": 0.7252,
"step": 153
},
{
"epoch": 0.9166666666666666,
"grad_norm": NaN,
"learning_rate": 0.00013208257698153677,
"loss": 0.7274,
"step": 154
},
{
"epoch": 0.9226190476190477,
"grad_norm": NaN,
"learning_rate": 0.00013109884950114007,
"loss": 0.7298,
"step": 155
},
{
"epoch": 0.9285714285714286,
"grad_norm": NaN,
"learning_rate": 0.00013011177887799845,
"loss": 0.7546,
"step": 156
},
{
"epoch": 0.9345238095238095,
"grad_norm": NaN,
"learning_rate": 0.00012912147122272523,
"loss": 0.7247,
"step": 157
},
{
"epoch": 0.9404761904761905,
"grad_norm": NaN,
"learning_rate": 0.00012812803299391628,
"loss": 0.7357,
"step": 158
},
{
"epoch": 0.9464285714285714,
"grad_norm": NaN,
"learning_rate": 0.0001271315709867059,
"loss": 0.7334,
"step": 159
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.3542296588420868,
"learning_rate": 0.00012613219232128608,
"loss": 0.7322,
"step": 160
},
{
"epoch": 0.9583333333333334,
"grad_norm": NaN,
"learning_rate": 0.00012513000443139112,
"loss": 0.7323,
"step": 161
},
{
"epoch": 0.9642857142857143,
"grad_norm": NaN,
"learning_rate": 0.00012412511505274844,
"loss": 0.7083,
"step": 162
},
{
"epoch": 0.9702380952380952,
"grad_norm": NaN,
"learning_rate": 0.000123117632211497,
"loss": 0.7315,
"step": 163
},
{
"epoch": 0.9761904761904762,
"grad_norm": NaN,
"learning_rate": 0.0001221076642125742,
"loss": 0.7154,
"step": 164
},
{
"epoch": 0.9821428571428571,
"grad_norm": NaN,
"learning_rate": 0.00012109531962807332,
"loss": 0.733,
"step": 165
},
{
"epoch": 0.9880952380952381,
"grad_norm": 0.4259299337863922,
"learning_rate": 0.00012008070728557186,
"loss": 0.726,
"step": 166
},
{
"epoch": 0.9940476190476191,
"grad_norm": NaN,
"learning_rate": 0.00011906393625643244,
"loss": 0.7351,
"step": 167
},
{
"epoch": 1.0,
"grad_norm": NaN,
"learning_rate": 0.00011804511584407763,
"loss": 0.7286,
"step": 168
},
{
"epoch": 1.0,
"eval_loss": 0.5009281635284424,
"eval_runtime": 1968.0548,
"eval_samples_per_second": 12.245,
"eval_steps_per_second": 0.096,
"step": 168
}
],
"logging_steps": 1,
"max_steps": 336,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.4143922966152348e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}