Highlight_checkpoint / trainer_state.json
Agatha7k's picture
Upload folder using huggingface_hub
d8dd014 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9890909090909092,
"eval_steps": 500,
"global_step": 153,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019393939393939394,
"grad_norm": 87.14871215820312,
"learning_rate": 0.0,
"loss": 2.7491,
"step": 1
},
{
"epoch": 0.03878787878787879,
"grad_norm": 88.4898452758789,
"learning_rate": 4e-05,
"loss": 2.7719,
"step": 2
},
{
"epoch": 0.05818181818181818,
"grad_norm": 55.55705642700195,
"learning_rate": 8e-05,
"loss": 1.3445,
"step": 3
},
{
"epoch": 0.07757575757575758,
"grad_norm": 68.65280151367188,
"learning_rate": 0.00012,
"loss": 2.8264,
"step": 4
},
{
"epoch": 0.09696969696969697,
"grad_norm": 60.984527587890625,
"learning_rate": 0.00016,
"loss": 1.2853,
"step": 5
},
{
"epoch": 0.11636363636363636,
"grad_norm": 79.5303955078125,
"learning_rate": 0.0002,
"loss": 1.5542,
"step": 6
},
{
"epoch": 0.13575757575757577,
"grad_norm": 122.48136138916016,
"learning_rate": 0.00019997747161747695,
"loss": 4.8128,
"step": 7
},
{
"epoch": 0.15515151515151515,
"grad_norm": 115.57413482666016,
"learning_rate": 0.00019990989662046818,
"loss": 7.7786,
"step": 8
},
{
"epoch": 0.17454545454545456,
"grad_norm": 106.0721664428711,
"learning_rate": 0.00019979730545608126,
"loss": 14.3271,
"step": 9
},
{
"epoch": 0.19393939393939394,
"grad_norm": 187.45887756347656,
"learning_rate": 0.00019963974885425266,
"loss": 10.5438,
"step": 10
},
{
"epoch": 0.21333333333333335,
"grad_norm": 67.8653564453125,
"learning_rate": 0.00019943729780489027,
"loss": 8.8936,
"step": 11
},
{
"epoch": 0.23272727272727273,
"grad_norm": 53.751426696777344,
"learning_rate": 0.00019919004352588767,
"loss": 6.9044,
"step": 12
},
{
"epoch": 0.25212121212121213,
"grad_norm": 68.24635314941406,
"learning_rate": 0.00019889809742202455,
"loss": 6.6015,
"step": 13
},
{
"epoch": 0.27151515151515154,
"grad_norm": 49.91704559326172,
"learning_rate": 0.00019856159103477086,
"loss": 4.7958,
"step": 14
},
{
"epoch": 0.2909090909090909,
"grad_norm": 84.79557800292969,
"learning_rate": 0.0001981806759830189,
"loss": 4.4889,
"step": 15
},
{
"epoch": 0.3103030303030303,
"grad_norm": 42.62947463989258,
"learning_rate": 0.00019775552389476864,
"loss": 3.852,
"step": 16
},
{
"epoch": 0.3296969696969697,
"grad_norm": 56.70987319946289,
"learning_rate": 0.00019728632632979746,
"loss": 3.7326,
"step": 17
},
{
"epoch": 0.3490909090909091,
"grad_norm": 48.838932037353516,
"learning_rate": 0.0001967732946933499,
"loss": 3.5072,
"step": 18
},
{
"epoch": 0.36848484848484847,
"grad_norm": 36.361454010009766,
"learning_rate": 0.00019621666014088494,
"loss": 3.3605,
"step": 19
},
{
"epoch": 0.3878787878787879,
"grad_norm": 42.21967697143555,
"learning_rate": 0.00019561667347392508,
"loss": 3.405,
"step": 20
},
{
"epoch": 0.4072727272727273,
"grad_norm": 38.328067779541016,
"learning_rate": 0.0001949736050270532,
"loss": 3.2796,
"step": 21
},
{
"epoch": 0.4266666666666667,
"grad_norm": 35.21055221557617,
"learning_rate": 0.00019428774454610843,
"loss": 3.1108,
"step": 22
},
{
"epoch": 0.44606060606060605,
"grad_norm": 36.039955139160156,
"learning_rate": 0.0001935594010576362,
"loss": 2.989,
"step": 23
},
{
"epoch": 0.46545454545454545,
"grad_norm": 46.623226165771484,
"learning_rate": 0.00019278890272965096,
"loss": 2.9046,
"step": 24
},
{
"epoch": 0.48484848484848486,
"grad_norm": 42.583335876464844,
"learning_rate": 0.0001919765967237739,
"loss": 2.7366,
"step": 25
},
{
"epoch": 0.5042424242424243,
"grad_norm": 27.625574111938477,
"learning_rate": 0.0001911228490388136,
"loss": 2.538,
"step": 26
},
{
"epoch": 0.5236363636363637,
"grad_norm": 22.885229110717773,
"learning_rate": 0.00019022804434585852,
"loss": 2.4008,
"step": 27
},
{
"epoch": 0.5430303030303031,
"grad_norm": 25.352842330932617,
"learning_rate": 0.00018929258581495685,
"loss": 2.2888,
"step": 28
},
{
"epoch": 0.5624242424242424,
"grad_norm": 25.622007369995117,
"learning_rate": 0.00018831689493346095,
"loss": 2.1831,
"step": 29
},
{
"epoch": 0.5818181818181818,
"grad_norm": 24.7369327545166,
"learning_rate": 0.00018730141131611882,
"loss": 2.0357,
"step": 30
},
{
"epoch": 0.6012121212121212,
"grad_norm": 24.530712127685547,
"learning_rate": 0.00018624659250699805,
"loss": 1.91,
"step": 31
},
{
"epoch": 0.6206060606060606,
"grad_norm": 21.335567474365234,
"learning_rate": 0.00018515291377333112,
"loss": 1.7888,
"step": 32
},
{
"epoch": 0.64,
"grad_norm": 21.168838500976562,
"learning_rate": 0.00018402086789137546,
"loss": 1.6856,
"step": 33
},
{
"epoch": 0.6593939393939394,
"grad_norm": 22.99504280090332,
"learning_rate": 0.00018285096492438424,
"loss": 1.5913,
"step": 34
},
{
"epoch": 0.6787878787878788,
"grad_norm": 19.430891036987305,
"learning_rate": 0.00018164373199278856,
"loss": 1.5296,
"step": 35
},
{
"epoch": 0.6981818181818182,
"grad_norm": 17.32403564453125,
"learning_rate": 0.00018039971303669407,
"loss": 1.4921,
"step": 36
},
{
"epoch": 0.7175757575757575,
"grad_norm": 13.494332313537598,
"learning_rate": 0.00017911946857079888,
"loss": 1.4273,
"step": 37
},
{
"epoch": 0.7369696969696969,
"grad_norm": 13.716304779052734,
"learning_rate": 0.00017780357543184397,
"loss": 1.3661,
"step": 38
},
{
"epoch": 0.7563636363636363,
"grad_norm": 18.049741744995117,
"learning_rate": 0.00017645262651870926,
"loss": 1.3225,
"step": 39
},
{
"epoch": 0.7757575757575758,
"grad_norm": 12.06884765625,
"learning_rate": 0.00017506723052527242,
"loss": 1.2614,
"step": 40
},
{
"epoch": 0.7951515151515152,
"grad_norm": 10.950291633605957,
"learning_rate": 0.00017364801166615124,
"loss": 1.2235,
"step": 41
},
{
"epoch": 0.8145454545454546,
"grad_norm": 10.462716102600098,
"learning_rate": 0.00017219560939545246,
"loss": 1.1887,
"step": 42
},
{
"epoch": 0.833939393939394,
"grad_norm": 14.221378326416016,
"learning_rate": 0.00017071067811865476,
"loss": 1.1758,
"step": 43
},
{
"epoch": 0.8533333333333334,
"grad_norm": 9.887701034545898,
"learning_rate": 0.00016919388689775464,
"loss": 1.1214,
"step": 44
},
{
"epoch": 0.8727272727272727,
"grad_norm": 13.127241134643555,
"learning_rate": 0.0001676459191498087,
"loss": 1.0835,
"step": 45
},
{
"epoch": 0.8921212121212121,
"grad_norm": 16.6353816986084,
"learning_rate": 0.00016606747233900815,
"loss": 1.0416,
"step": 46
},
{
"epoch": 0.9115151515151515,
"grad_norm": 14.955666542053223,
"learning_rate": 0.00016445925766242391,
"loss": 1.0035,
"step": 47
},
{
"epoch": 0.9309090909090909,
"grad_norm": 9.893986701965332,
"learning_rate": 0.00016282199972956425,
"loss": 0.9478,
"step": 48
},
{
"epoch": 0.9503030303030303,
"grad_norm": 7.6303606033325195,
"learning_rate": 0.00016115643623588915,
"loss": 0.9117,
"step": 49
},
{
"epoch": 0.9696969696969697,
"grad_norm": 6.176935195922852,
"learning_rate": 0.00015946331763042867,
"loss": 0.8864,
"step": 50
},
{
"epoch": 0.9890909090909091,
"grad_norm": 5.813427448272705,
"learning_rate": 0.0001577434067776548,
"loss": 0.8923,
"step": 51
},
{
"epoch": 1.0193939393939393,
"grad_norm": 5.501270294189453,
"learning_rate": 0.00015599747861375955,
"loss": 1.1926,
"step": 52
},
{
"epoch": 1.0387878787878788,
"grad_norm": 6.7219743728637695,
"learning_rate": 0.00015422631979749354,
"loss": 0.8721,
"step": 53
},
{
"epoch": 1.0581818181818181,
"grad_norm": 5.997743606567383,
"learning_rate": 0.00015243072835572318,
"loss": 0.8581,
"step": 54
},
{
"epoch": 1.0775757575757576,
"grad_norm": 6.01072359085083,
"learning_rate": 0.00015061151332386566,
"loss": 0.8375,
"step": 55
},
{
"epoch": 1.096969696969697,
"grad_norm": 7.074451923370361,
"learning_rate": 0.00014876949438136347,
"loss": 0.8521,
"step": 56
},
{
"epoch": 1.1163636363636364,
"grad_norm": 6.878782272338867,
"learning_rate": 0.0001469055014823637,
"loss": 0.8376,
"step": 57
},
{
"epoch": 1.1357575757575757,
"grad_norm": 7.401325225830078,
"learning_rate": 0.00014502037448176734,
"loss": 0.8258,
"step": 58
},
{
"epoch": 1.1551515151515153,
"grad_norm": 6.986009120941162,
"learning_rate": 0.00014311496275681783,
"loss": 0.8152,
"step": 59
},
{
"epoch": 1.1745454545454546,
"grad_norm": 3.373999834060669,
"learning_rate": 0.0001411901248243993,
"loss": 0.8212,
"step": 60
},
{
"epoch": 1.1939393939393939,
"grad_norm": 4.864870548248291,
"learning_rate": 0.00013924672795421637,
"loss": 0.8272,
"step": 61
},
{
"epoch": 1.2133333333333334,
"grad_norm": 5.978538513183594,
"learning_rate": 0.00013728564777803088,
"loss": 0.8253,
"step": 62
},
{
"epoch": 1.2327272727272727,
"grad_norm": 5.256051540374756,
"learning_rate": 0.0001353077678951301,
"loss": 0.8115,
"step": 63
},
{
"epoch": 1.2521212121212122,
"grad_norm": 4.685987949371338,
"learning_rate": 0.00013331397947420576,
"loss": 0.806,
"step": 64
},
{
"epoch": 1.2715151515151515,
"grad_norm": 5.210947513580322,
"learning_rate": 0.00013130518085182225,
"loss": 0.8114,
"step": 65
},
{
"epoch": 1.290909090909091,
"grad_norm": 7.189337730407715,
"learning_rate": 0.00012928227712765504,
"loss": 0.8136,
"step": 66
},
{
"epoch": 1.3103030303030303,
"grad_norm": 4.788082599639893,
"learning_rate": 0.0001272461797566823,
"loss": 0.798,
"step": 67
},
{
"epoch": 1.3296969696969696,
"grad_norm": 6.347202301025391,
"learning_rate": 0.00012519780613851254,
"loss": 0.8164,
"step": 68
},
{
"epoch": 1.3490909090909091,
"grad_norm": 4.3109307289123535,
"learning_rate": 0.00012313807920403419,
"loss": 0.7968,
"step": 69
},
{
"epoch": 1.3684848484848484,
"grad_norm": 4.202847480773926,
"learning_rate": 0.00012106792699957263,
"loss": 0.8084,
"step": 70
},
{
"epoch": 1.387878787878788,
"grad_norm": 5.670694351196289,
"learning_rate": 0.00011898828226874284,
"loss": 0.8105,
"step": 71
},
{
"epoch": 1.4072727272727272,
"grad_norm": 6.197960376739502,
"learning_rate": 0.00011690008203218493,
"loss": 0.8075,
"step": 72
},
{
"epoch": 1.4266666666666667,
"grad_norm": 5.756638526916504,
"learning_rate": 0.00011480426716537315,
"loss": 0.791,
"step": 73
},
{
"epoch": 1.446060606060606,
"grad_norm": 4.505667209625244,
"learning_rate": 0.00011270178197468789,
"loss": 0.8066,
"step": 74
},
{
"epoch": 1.4654545454545453,
"grad_norm": 4.306074142456055,
"learning_rate": 0.00011059357377194161,
"loss": 0.7946,
"step": 75
},
{
"epoch": 1.4848484848484849,
"grad_norm": 5.666556358337402,
"learning_rate": 0.00010848059244755093,
"loss": 0.8056,
"step": 76
},
{
"epoch": 1.5042424242424244,
"grad_norm": 4.2490315437316895,
"learning_rate": 0.00010636379004254664,
"loss": 0.8098,
"step": 77
},
{
"epoch": 1.5236363636363637,
"grad_norm": 4.633942604064941,
"learning_rate": 0.00010424412031961484,
"loss": 0.8032,
"step": 78
},
{
"epoch": 1.543030303030303,
"grad_norm": 4.449273109436035,
"learning_rate": 0.00010212253833336237,
"loss": 0.7973,
"step": 79
},
{
"epoch": 1.5624242424242425,
"grad_norm": 4.270414352416992,
"learning_rate": 0.0001,
"loss": 0.8055,
"step": 80
},
{
"epoch": 1.5818181818181818,
"grad_norm": 3.798518657684326,
"learning_rate": 9.787746166663764e-05,
"loss": 0.7913,
"step": 81
},
{
"epoch": 1.601212121212121,
"grad_norm": 4.9506425857543945,
"learning_rate": 9.57558796803852e-05,
"loss": 0.7878,
"step": 82
},
{
"epoch": 1.6206060606060606,
"grad_norm": 2.22216534614563,
"learning_rate": 9.363620995745337e-05,
"loss": 0.7962,
"step": 83
},
{
"epoch": 1.6400000000000001,
"grad_norm": 4.047158241271973,
"learning_rate": 9.151940755244912e-05,
"loss": 0.7892,
"step": 84
},
{
"epoch": 1.6593939393939394,
"grad_norm": 3.936147689819336,
"learning_rate": 8.94064262280584e-05,
"loss": 0.7937,
"step": 85
},
{
"epoch": 1.6787878787878787,
"grad_norm": 2.8039066791534424,
"learning_rate": 8.729821802531212e-05,
"loss": 0.7902,
"step": 86
},
{
"epoch": 1.6981818181818182,
"grad_norm": 3.999570608139038,
"learning_rate": 8.519573283462687e-05,
"loss": 0.7902,
"step": 87
},
{
"epoch": 1.7175757575757575,
"grad_norm": 6.398386478424072,
"learning_rate": 8.309991796781511e-05,
"loss": 0.7874,
"step": 88
},
{
"epoch": 1.7369696969696968,
"grad_norm": 3.3710646629333496,
"learning_rate": 8.101171773125716e-05,
"loss": 0.8017,
"step": 89
},
{
"epoch": 1.7563636363636363,
"grad_norm": 4.403554439544678,
"learning_rate": 7.89320730004274e-05,
"loss": 0.7909,
"step": 90
},
{
"epoch": 1.7757575757575759,
"grad_norm": 3.6332623958587646,
"learning_rate": 7.686192079596586e-05,
"loss": 0.7914,
"step": 91
},
{
"epoch": 1.7951515151515152,
"grad_norm": 2.992091655731201,
"learning_rate": 7.48021938614875e-05,
"loss": 0.8005,
"step": 92
},
{
"epoch": 1.8145454545454545,
"grad_norm": 3.749993324279785,
"learning_rate": 7.275382024331772e-05,
"loss": 0.7916,
"step": 93
},
{
"epoch": 1.833939393939394,
"grad_norm": 5.296722888946533,
"learning_rate": 7.071772287234497e-05,
"loss": 0.7968,
"step": 94
},
{
"epoch": 1.8533333333333335,
"grad_norm": 3.761016607284546,
"learning_rate": 6.869481914817779e-05,
"loss": 0.7856,
"step": 95
},
{
"epoch": 1.8727272727272726,
"grad_norm": 2.1738433837890625,
"learning_rate": 6.668602052579424e-05,
"loss": 0.7907,
"step": 96
},
{
"epoch": 1.892121212121212,
"grad_norm": 4.787725448608398,
"learning_rate": 6.469223210486992e-05,
"loss": 0.7969,
"step": 97
},
{
"epoch": 1.9115151515151516,
"grad_norm": 3.5583176612854004,
"learning_rate": 6.271435222196916e-05,
"loss": 0.789,
"step": 98
},
{
"epoch": 1.930909090909091,
"grad_norm": 3.7247867584228516,
"learning_rate": 6.0753272045783625e-05,
"loss": 0.7984,
"step": 99
},
{
"epoch": 1.9503030303030302,
"grad_norm": 3.7892508506774902,
"learning_rate": 5.880987517560075e-05,
"loss": 0.7951,
"step": 100
},
{
"epoch": 1.9696969696969697,
"grad_norm": 3.6910500526428223,
"learning_rate": 5.688503724318217e-05,
"loss": 0.7995,
"step": 101
},
{
"epoch": 1.9890909090909092,
"grad_norm": 3.318086862564087,
"learning_rate": 5.497962551823266e-05,
"loss": 0.7816,
"step": 102
},
{
"epoch": 2.0193939393939395,
"grad_norm": 4.396894454956055,
"learning_rate": 5.309449851763633e-05,
"loss": 1.092,
"step": 103
},
{
"epoch": 2.0387878787878786,
"grad_norm": 2.913602828979492,
"learning_rate": 5.123050561863657e-05,
"loss": 0.7868,
"step": 104
},
{
"epoch": 2.058181818181818,
"grad_norm": 5.820463180541992,
"learning_rate": 4.938848667613436e-05,
"loss": 0.792,
"step": 105
},
{
"epoch": 2.0775757575757576,
"grad_norm": 3.3575682640075684,
"learning_rate": 4.756927164427685e-05,
"loss": 0.7894,
"step": 106
},
{
"epoch": 2.096969696969697,
"grad_norm": 4.6300458908081055,
"learning_rate": 4.57736802025065e-05,
"loss": 0.7882,
"step": 107
},
{
"epoch": 2.1163636363636362,
"grad_norm": 3.036693572998047,
"learning_rate": 4.4002521386240466e-05,
"loss": 0.7934,
"step": 108
},
{
"epoch": 2.1357575757575757,
"grad_norm": 3.7198660373687744,
"learning_rate": 4.2256593222345185e-05,
"loss": 0.7864,
"step": 109
},
{
"epoch": 2.1551515151515153,
"grad_norm": 3.7882423400878906,
"learning_rate": 4.053668236957134e-05,
"loss": 0.785,
"step": 110
},
{
"epoch": 2.174545454545455,
"grad_norm": 2.843419075012207,
"learning_rate": 3.884356376411089e-05,
"loss": 0.7836,
"step": 111
},
{
"epoch": 2.193939393939394,
"grad_norm": 2.6506614685058594,
"learning_rate": 3.717800027043576e-05,
"loss": 0.7894,
"step": 112
},
{
"epoch": 2.2133333333333334,
"grad_norm": 3.587935447692871,
"learning_rate": 3.554074233757608e-05,
"loss": 0.7884,
"step": 113
},
{
"epoch": 2.232727272727273,
"grad_norm": 3.2883381843566895,
"learning_rate": 3.393252766099187e-05,
"loss": 0.7926,
"step": 114
},
{
"epoch": 2.252121212121212,
"grad_norm": 4.512854099273682,
"learning_rate": 3.2354080850191324e-05,
"loss": 0.7839,
"step": 115
},
{
"epoch": 2.2715151515151515,
"grad_norm": 4.273723125457764,
"learning_rate": 3.080611310224539e-05,
"loss": 0.7902,
"step": 116
},
{
"epoch": 2.290909090909091,
"grad_norm": 3.590773820877075,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.7818,
"step": 117
},
{
"epoch": 2.3103030303030305,
"grad_norm": 3.1294124126434326,
"learning_rate": 2.7804390604547557e-05,
"loss": 0.786,
"step": 118
},
{
"epoch": 2.3296969696969696,
"grad_norm": 3.600764751434326,
"learning_rate": 2.6351988333848788e-05,
"loss": 0.7853,
"step": 119
},
{
"epoch": 2.349090909090909,
"grad_norm": 3.205591917037964,
"learning_rate": 2.493276947472756e-05,
"loss": 0.7835,
"step": 120
},
{
"epoch": 2.3684848484848486,
"grad_norm": 2.840078592300415,
"learning_rate": 2.354737348129077e-05,
"loss": 0.785,
"step": 121
},
{
"epoch": 2.3878787878787877,
"grad_norm": 3.47402286529541,
"learning_rate": 2.2196424568156073e-05,
"loss": 0.7822,
"step": 122
},
{
"epoch": 2.4072727272727272,
"grad_norm": 3.246713638305664,
"learning_rate": 2.0880531429201145e-05,
"loss": 0.779,
"step": 123
},
{
"epoch": 2.4266666666666667,
"grad_norm": 3.6749753952026367,
"learning_rate": 1.9600286963305957e-05,
"loss": 0.7974,
"step": 124
},
{
"epoch": 2.4460606060606063,
"grad_norm": 3.423552989959717,
"learning_rate": 1.835626800721144e-05,
"loss": 0.795,
"step": 125
},
{
"epoch": 2.4654545454545453,
"grad_norm": 5.248719215393066,
"learning_rate": 1.7149035075615794e-05,
"loss": 0.7816,
"step": 126
},
{
"epoch": 2.484848484848485,
"grad_norm": 2.561350107192993,
"learning_rate": 1.5979132108624574e-05,
"loss": 0.7886,
"step": 127
},
{
"epoch": 2.5042424242424244,
"grad_norm": 2.6997573375701904,
"learning_rate": 1.4847086226668872e-05,
"loss": 0.788,
"step": 128
},
{
"epoch": 2.5236363636363635,
"grad_norm": 2.400233268737793,
"learning_rate": 1.3753407493001968e-05,
"loss": 0.7785,
"step": 129
},
{
"epoch": 2.543030303030303,
"grad_norm": 4.139961242675781,
"learning_rate": 1.2698588683881186e-05,
"loss": 0.7846,
"step": 130
},
{
"epoch": 2.5624242424242425,
"grad_norm": 3.8779916763305664,
"learning_rate": 1.1683105066539068e-05,
"loss": 0.7877,
"step": 131
},
{
"epoch": 2.581818181818182,
"grad_norm": 3.5718817710876465,
"learning_rate": 1.0707414185043163e-05,
"loss": 0.7755,
"step": 132
},
{
"epoch": 2.601212121212121,
"grad_norm": 2.733354330062866,
"learning_rate": 9.771955654141496e-06,
"loss": 0.7923,
"step": 133
},
{
"epoch": 2.6206060606060606,
"grad_norm": 3.5972788333892822,
"learning_rate": 8.87715096118642e-06,
"loss": 0.7829,
"step": 134
},
{
"epoch": 2.64,
"grad_norm": 2.625112533569336,
"learning_rate": 8.023403276226126e-06,
"loss": 0.7758,
"step": 135
},
{
"epoch": 2.659393939393939,
"grad_norm": 1.7941921949386597,
"learning_rate": 7.211097270349066e-06,
"loss": 0.7896,
"step": 136
},
{
"epoch": 2.6787878787878787,
"grad_norm": 3.4884462356567383,
"learning_rate": 6.440598942363796e-06,
"loss": 0.787,
"step": 137
},
{
"epoch": 2.6981818181818182,
"grad_norm": 3.0694332122802734,
"learning_rate": 5.71225545389158e-06,
"loss": 0.7787,
"step": 138
},
{
"epoch": 2.7175757575757578,
"grad_norm": 3.9330761432647705,
"learning_rate": 5.026394972946813e-06,
"loss": 0.7724,
"step": 139
},
{
"epoch": 2.736969696969697,
"grad_norm": 4.874517917633057,
"learning_rate": 4.383326526074916e-06,
"loss": 0.7737,
"step": 140
},
{
"epoch": 2.7563636363636363,
"grad_norm": 3.3836331367492676,
"learning_rate": 3.783339859115065e-06,
"loss": 0.7892,
"step": 141
},
{
"epoch": 2.775757575757576,
"grad_norm": 3.0882411003112793,
"learning_rate": 3.226705306650113e-06,
"loss": 0.7905,
"step": 142
},
{
"epoch": 2.795151515151515,
"grad_norm": 3.052604913711548,
"learning_rate": 2.7136736702025433e-06,
"loss": 0.7853,
"step": 143
},
{
"epoch": 2.8145454545454545,
"grad_norm": 1.9272351264953613,
"learning_rate": 2.2444761052313856e-06,
"loss": 0.7732,
"step": 144
},
{
"epoch": 2.833939393939394,
"grad_norm": 2.5749495029449463,
"learning_rate": 1.8193240169810943e-06,
"loss": 0.7846,
"step": 145
},
{
"epoch": 2.8533333333333335,
"grad_norm": 1.8724554777145386,
"learning_rate": 1.4384089652291543e-06,
"loss": 0.7803,
"step": 146
},
{
"epoch": 2.8727272727272726,
"grad_norm": 2.784907102584839,
"learning_rate": 1.1019025779754666e-06,
"loss": 0.7838,
"step": 147
},
{
"epoch": 2.892121212121212,
"grad_norm": 4.25577449798584,
"learning_rate": 8.099564741123166e-07,
"loss": 0.7863,
"step": 148
},
{
"epoch": 2.9115151515151516,
"grad_norm": 2.0086898803710938,
"learning_rate": 5.627021951097545e-07,
"loss": 0.7808,
"step": 149
},
{
"epoch": 2.9309090909090907,
"grad_norm": 2.6451563835144043,
"learning_rate": 3.6025114574734785e-07,
"loss": 0.7877,
"step": 150
},
{
"epoch": 2.95030303030303,
"grad_norm": 3.0813286304473877,
"learning_rate": 2.0269454391874666e-07,
"loss": 0.7727,
"step": 151
},
{
"epoch": 2.9696969696969697,
"grad_norm": 2.1434407234191895,
"learning_rate": 9.010337953185843e-08,
"loss": 0.7861,
"step": 152
},
{
"epoch": 2.9890909090909092,
"grad_norm": 2.7659671306610107,
"learning_rate": 2.2528382523057113e-08,
"loss": 0.7782,
"step": 153
},
{
"epoch": 2.9890909090909092,
"step": 153,
"total_flos": 2.4132084696088576e+18,
"train_loss": 1.5338537124247333,
"train_runtime": 26135.9705,
"train_samples_per_second": 0.757,
"train_steps_per_second": 0.006
}
],
"logging_steps": 1.0,
"max_steps": 153,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.4132084696088576e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}