| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 16.93658536585366, | |
| "eval_steps": 500, | |
| "global_step": 136, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.11707317073170732, | |
| "grad_norm": 3.068251132965088, | |
| "learning_rate": 7.142857142857143e-07, | |
| "loss": 0.7283, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.23414634146341465, | |
| "grad_norm": 3.0890955924987793, | |
| "learning_rate": 1.4285714285714286e-06, | |
| "loss": 0.7468, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.35121951219512193, | |
| "grad_norm": 2.848057746887207, | |
| "learning_rate": 2.1428571428571427e-06, | |
| "loss": 0.7287, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.4682926829268293, | |
| "grad_norm": 2.817434549331665, | |
| "learning_rate": 2.8571428571428573e-06, | |
| "loss": 0.7216, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.5853658536585366, | |
| "grad_norm": 2.3008267879486084, | |
| "learning_rate": 3.5714285714285718e-06, | |
| "loss": 0.6971, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.7024390243902439, | |
| "grad_norm": 2.112135887145996, | |
| "learning_rate": 4.2857142857142855e-06, | |
| "loss": 0.7117, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.8195121951219512, | |
| "grad_norm": 1.6284232139587402, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6701, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.9365853658536586, | |
| "grad_norm": 1.4377871751785278, | |
| "learning_rate": 5.7142857142857145e-06, | |
| "loss": 0.6756, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 1.1170731707317074, | |
| "grad_norm": 2.7016568183898926, | |
| "learning_rate": 6.4285714285714295e-06, | |
| "loss": 1.2659, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 1.2341463414634146, | |
| "grad_norm": 1.4025566577911377, | |
| "learning_rate": 7.1428571428571436e-06, | |
| "loss": 0.629, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.3512195121951218, | |
| "grad_norm": 1.0815410614013672, | |
| "learning_rate": 7.857142857142858e-06, | |
| "loss": 0.5818, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 1.4682926829268292, | |
| "grad_norm": 1.4897761344909668, | |
| "learning_rate": 8.571428571428571e-06, | |
| "loss": 0.5669, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.5853658536585367, | |
| "grad_norm": 1.0598845481872559, | |
| "learning_rate": 9.285714285714288e-06, | |
| "loss": 0.5562, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 1.7024390243902439, | |
| "grad_norm": 0.800104022026062, | |
| "learning_rate": 1e-05, | |
| "loss": 0.5349, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.819512195121951, | |
| "grad_norm": 0.7887536883354187, | |
| "learning_rate": 9.998342337571566e-06, | |
| "loss": 0.5225, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.9365853658536585, | |
| "grad_norm": 1.0338839292526245, | |
| "learning_rate": 9.993370449424153e-06, | |
| "loss": 0.5276, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 2.1170731707317074, | |
| "grad_norm": 1.6621471643447876, | |
| "learning_rate": 9.985087632242634e-06, | |
| "loss": 1.014, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 2.234146341463415, | |
| "grad_norm": 0.6436282992362976, | |
| "learning_rate": 9.973499378072947e-06, | |
| "loss": 0.4925, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 2.351219512195122, | |
| "grad_norm": 0.5632502436637878, | |
| "learning_rate": 9.958613370680507e-06, | |
| "loss": 0.4723, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 2.4682926829268292, | |
| "grad_norm": 0.5272106528282166, | |
| "learning_rate": 9.940439480455386e-06, | |
| "loss": 0.4697, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.5853658536585367, | |
| "grad_norm": 0.5168065428733826, | |
| "learning_rate": 9.918989757867584e-06, | |
| "loss": 0.4653, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 2.7024390243902436, | |
| "grad_norm": 0.545330286026001, | |
| "learning_rate": 9.89427842547679e-06, | |
| "loss": 0.4437, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 2.819512195121951, | |
| "grad_norm": 0.44697803258895874, | |
| "learning_rate": 9.866321868501914e-06, | |
| "loss": 0.4645, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.9365853658536585, | |
| "grad_norm": 0.36716875433921814, | |
| "learning_rate": 9.835138623956603e-06, | |
| "loss": 0.4637, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 3.1170731707317074, | |
| "grad_norm": 0.8350607752799988, | |
| "learning_rate": 9.80074936835801e-06, | |
| "loss": 0.8482, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 3.234146341463415, | |
| "grad_norm": 0.34782156348228455, | |
| "learning_rate": 9.763176904016914e-06, | |
| "loss": 0.4335, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 3.351219512195122, | |
| "grad_norm": 0.38511568307876587, | |
| "learning_rate": 9.722446143918307e-06, | |
| "loss": 0.4296, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 3.4682926829268292, | |
| "grad_norm": 0.3624374270439148, | |
| "learning_rate": 9.678584095202468e-06, | |
| "loss": 0.4104, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 3.5853658536585367, | |
| "grad_norm": 0.32947778701782227, | |
| "learning_rate": 9.631619841257477e-06, | |
| "loss": 0.3993, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 3.7024390243902436, | |
| "grad_norm": 0.34152206778526306, | |
| "learning_rate": 9.581584522435025e-06, | |
| "loss": 0.4123, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 3.819512195121951, | |
| "grad_norm": 0.36183634400367737, | |
| "learning_rate": 9.528511315402358e-06, | |
| "loss": 0.4022, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 3.9365853658536585, | |
| "grad_norm": 0.350755512714386, | |
| "learning_rate": 9.472435411143979e-06, | |
| "loss": 0.4049, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 4.117073170731707, | |
| "grad_norm": 0.6949635744094849, | |
| "learning_rate": 9.413393991627737e-06, | |
| "loss": 0.7805, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 4.234146341463415, | |
| "grad_norm": 0.28440478444099426, | |
| "learning_rate": 9.351426205150778e-06, | |
| "loss": 0.3658, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 4.351219512195122, | |
| "grad_norm": 0.35889554023742676, | |
| "learning_rate": 9.286573140381663e-06, | |
| "loss": 0.3743, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 4.46829268292683, | |
| "grad_norm": 0.3411248028278351, | |
| "learning_rate": 9.218877799115929e-06, | |
| "loss": 0.3684, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 4.585365853658536, | |
| "grad_norm": 0.3742453455924988, | |
| "learning_rate": 9.148385067763094e-06, | |
| "loss": 0.3754, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 4.702439024390244, | |
| "grad_norm": 0.30102893710136414, | |
| "learning_rate": 9.075141687584056e-06, | |
| "loss": 0.3595, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 4.819512195121951, | |
| "grad_norm": 0.3214716911315918, | |
| "learning_rate": 8.999196223698599e-06, | |
| "loss": 0.3792, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 4.9365853658536585, | |
| "grad_norm": 0.29708611965179443, | |
| "learning_rate": 8.920599032883553e-06, | |
| "loss": 0.3585, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 5.117073170731707, | |
| "grad_norm": 0.6357777714729309, | |
| "learning_rate": 8.839402230183e-06, | |
| "loss": 0.7275, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 5.234146341463415, | |
| "grad_norm": 0.29909756779670715, | |
| "learning_rate": 8.755659654352599e-06, | |
| "loss": 0.3258, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 5.351219512195122, | |
| "grad_norm": 0.3142697513103485, | |
| "learning_rate": 8.669426832160997e-06, | |
| "loss": 0.3419, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 5.46829268292683, | |
| "grad_norm": 0.31918543577194214, | |
| "learning_rate": 8.580760941571968e-06, | |
| "loss": 0.3464, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 5.585365853658536, | |
| "grad_norm": 0.29149070382118225, | |
| "learning_rate": 8.489720773831717e-06, | |
| "loss": 0.3103, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 5.702439024390244, | |
| "grad_norm": 0.3091978132724762, | |
| "learning_rate": 8.396366694486466e-06, | |
| "loss": 0.323, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 5.819512195121951, | |
| "grad_norm": 0.33374419808387756, | |
| "learning_rate": 8.30076060335616e-06, | |
| "loss": 0.3192, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 5.9365853658536585, | |
| "grad_norm": 0.2958971858024597, | |
| "learning_rate": 8.202965893490877e-06, | |
| "loss": 0.3166, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 6.117073170731707, | |
| "grad_norm": 0.2610955536365509, | |
| "learning_rate": 8.103047409137114e-06, | |
| "loss": 0.6359, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 6.234146341463415, | |
| "grad_norm": 0.30614060163497925, | |
| "learning_rate": 8.001071402741843e-06, | |
| "loss": 0.2911, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 6.351219512195122, | |
| "grad_norm": 0.28601980209350586, | |
| "learning_rate": 7.897105491022819e-06, | |
| "loss": 0.2911, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 6.46829268292683, | |
| "grad_norm": 0.288555771112442, | |
| "learning_rate": 7.791218610134324e-06, | |
| "loss": 0.2929, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 6.585365853658536, | |
| "grad_norm": 0.273262619972229, | |
| "learning_rate": 7.683480969958005e-06, | |
| "loss": 0.2856, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 6.702439024390244, | |
| "grad_norm": 0.27246272563934326, | |
| "learning_rate": 7.5739640075491546e-06, | |
| "loss": 0.2911, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 6.819512195121951, | |
| "grad_norm": 0.24324610829353333, | |
| "learning_rate": 7.462740339769323e-06, | |
| "loss": 0.2754, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 6.9365853658536585, | |
| "grad_norm": 0.28536200523376465, | |
| "learning_rate": 7.349883715136601e-06, | |
| "loss": 0.284, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 7.117073170731707, | |
| "grad_norm": 0.5948038697242737, | |
| "learning_rate": 7.235468964925571e-06, | |
| "loss": 0.5049, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 7.234146341463415, | |
| "grad_norm": 0.3587504029273987, | |
| "learning_rate": 7.119571953549305e-06, | |
| "loss": 0.2436, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 7.351219512195122, | |
| "grad_norm": 0.2773548364639282, | |
| "learning_rate": 7.002269528256334e-06, | |
| "loss": 0.2551, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 7.46829268292683, | |
| "grad_norm": 0.3290187120437622, | |
| "learning_rate": 6.883639468175926e-06, | |
| "loss": 0.2624, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 7.585365853658536, | |
| "grad_norm": 0.25596076250076294, | |
| "learning_rate": 6.763760432745475e-06, | |
| "loss": 0.2434, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 7.702439024390244, | |
| "grad_norm": 0.3228106200695038, | |
| "learning_rate": 6.6427119095541745e-06, | |
| "loss": 0.2461, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 7.819512195121951, | |
| "grad_norm": 0.2985459268093109, | |
| "learning_rate": 6.520574161637591e-06, | |
| "loss": 0.2454, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 7.9365853658536585, | |
| "grad_norm": 0.2825384736061096, | |
| "learning_rate": 6.397428174258048e-06, | |
| "loss": 0.2453, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 8.117073170731707, | |
| "grad_norm": 0.5776787400245667, | |
| "learning_rate": 6.273355601206143e-06, | |
| "loss": 0.4516, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 8.234146341463415, | |
| "grad_norm": 0.2711504101753235, | |
| "learning_rate": 6.148438710658979e-06, | |
| "loss": 0.224, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 8.351219512195122, | |
| "grad_norm": 0.29715049266815186, | |
| "learning_rate": 6.022760330631006e-06, | |
| "loss": 0.2096, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 8.46829268292683, | |
| "grad_norm": 0.27093052864074707, | |
| "learning_rate": 5.896403794053679e-06, | |
| "loss": 0.2175, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 8.585365853658537, | |
| "grad_norm": 0.29388418793678284, | |
| "learning_rate": 5.76945288352031e-06, | |
| "loss": 0.2162, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 8.702439024390245, | |
| "grad_norm": 0.248213529586792, | |
| "learning_rate": 5.641991775732756e-06, | |
| "loss": 0.1999, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 8.819512195121952, | |
| "grad_norm": 0.29978156089782715, | |
| "learning_rate": 5.514104985686802e-06, | |
| "loss": 0.2095, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 8.93658536585366, | |
| "grad_norm": 0.2769979238510132, | |
| "learning_rate": 5.385877310633233e-06, | |
| "loss": 0.189, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 9.117073170731707, | |
| "grad_norm": 0.6451863050460815, | |
| "learning_rate": 5.257393773851733e-06, | |
| "loss": 0.4253, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 9.234146341463415, | |
| "grad_norm": 0.2627960443496704, | |
| "learning_rate": 5.1287395682749444e-06, | |
| "loss": 0.1792, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 9.351219512195122, | |
| "grad_norm": 0.3619653284549713, | |
| "learning_rate": 5e-06, | |
| "loss": 0.1752, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 9.46829268292683, | |
| "grad_norm": 0.3499554991722107, | |
| "learning_rate": 4.871260431725058e-06, | |
| "loss": 0.1834, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 9.585365853658537, | |
| "grad_norm": 0.2961571216583252, | |
| "learning_rate": 4.742606226148268e-06, | |
| "loss": 0.1824, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 9.702439024390245, | |
| "grad_norm": 0.29610928893089294, | |
| "learning_rate": 4.614122689366769e-06, | |
| "loss": 0.1682, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 9.819512195121952, | |
| "grad_norm": 0.29082462191581726, | |
| "learning_rate": 4.485895014313198e-06, | |
| "loss": 0.1788, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 9.93658536585366, | |
| "grad_norm": 0.3001067638397217, | |
| "learning_rate": 4.358008224267245e-06, | |
| "loss": 0.173, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 10.117073170731707, | |
| "grad_norm": 0.5978091955184937, | |
| "learning_rate": 4.230547116479691e-06, | |
| "loss": 0.3196, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 10.234146341463415, | |
| "grad_norm": 0.25669512152671814, | |
| "learning_rate": 4.103596205946323e-06, | |
| "loss": 0.159, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 10.351219512195122, | |
| "grad_norm": 0.38418713212013245, | |
| "learning_rate": 3.977239669368998e-06, | |
| "loss": 0.1509, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 10.46829268292683, | |
| "grad_norm": 0.3050716519355774, | |
| "learning_rate": 3.851561289341023e-06, | |
| "loss": 0.1467, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 10.585365853658537, | |
| "grad_norm": 0.3000875413417816, | |
| "learning_rate": 3.726644398793857e-06, | |
| "loss": 0.1411, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 10.702439024390245, | |
| "grad_norm": 0.29455339908599854, | |
| "learning_rate": 3.6025718257419532e-06, | |
| "loss": 0.1442, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 10.819512195121952, | |
| "grad_norm": 0.29129162430763245, | |
| "learning_rate": 3.4794258383624115e-06, | |
| "loss": 0.1408, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 10.93658536585366, | |
| "grad_norm": 0.28606802225112915, | |
| "learning_rate": 3.3572880904458267e-06, | |
| "loss": 0.1523, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 11.117073170731707, | |
| "grad_norm": 0.6090349555015564, | |
| "learning_rate": 3.236239567254526e-06, | |
| "loss": 0.274, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 11.234146341463415, | |
| "grad_norm": 0.38308948278427124, | |
| "learning_rate": 3.116360531824074e-06, | |
| "loss": 0.1233, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 11.351219512195122, | |
| "grad_norm": 0.2933948338031769, | |
| "learning_rate": 2.997730471743667e-06, | |
| "loss": 0.1207, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 11.46829268292683, | |
| "grad_norm": 0.37544092535972595, | |
| "learning_rate": 2.880428046450697e-06, | |
| "loss": 0.1231, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 11.585365853658537, | |
| "grad_norm": 0.26281094551086426, | |
| "learning_rate": 2.7645310350744296e-06, | |
| "loss": 0.1253, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 11.702439024390245, | |
| "grad_norm": 0.34619009494781494, | |
| "learning_rate": 2.6501162848634023e-06, | |
| "loss": 0.1226, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 11.819512195121952, | |
| "grad_norm": 0.2849329113960266, | |
| "learning_rate": 2.537259660230679e-06, | |
| "loss": 0.1231, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 11.93658536585366, | |
| "grad_norm": 0.2921876609325409, | |
| "learning_rate": 2.426035992450848e-06, | |
| "loss": 0.1233, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 12.117073170731707, | |
| "grad_norm": 0.4465767741203308, | |
| "learning_rate": 2.316519030041998e-06, | |
| "loss": 0.2171, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 12.234146341463415, | |
| "grad_norm": 0.4664257764816284, | |
| "learning_rate": 2.2087813898656775e-06, | |
| "loss": 0.1071, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 12.351219512195122, | |
| "grad_norm": 0.27322566509246826, | |
| "learning_rate": 2.102894508977182e-06, | |
| "loss": 0.1039, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 12.46829268292683, | |
| "grad_norm": 0.42056962847709656, | |
| "learning_rate": 1.9989285972581595e-06, | |
| "loss": 0.1033, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 12.585365853658537, | |
| "grad_norm": 0.44652435183525085, | |
| "learning_rate": 1.896952590862886e-06, | |
| "loss": 0.1052, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 12.702439024390245, | |
| "grad_norm": 0.2609389126300812, | |
| "learning_rate": 1.7970341065091246e-06, | |
| "loss": 0.1051, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 12.819512195121952, | |
| "grad_norm": 0.3649502098560333, | |
| "learning_rate": 1.699239396643841e-06, | |
| "loss": 0.1059, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 12.93658536585366, | |
| "grad_norm": 0.40414321422576904, | |
| "learning_rate": 1.6036333055135345e-06, | |
| "loss": 0.1015, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 13.117073170731707, | |
| "grad_norm": 0.6658644080162048, | |
| "learning_rate": 1.5102792261682813e-06, | |
| "loss": 0.1962, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 13.234146341463415, | |
| "grad_norm": 0.2903091013431549, | |
| "learning_rate": 1.4192390584280347e-06, | |
| "loss": 0.0981, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 13.351219512195122, | |
| "grad_norm": 0.6606068015098572, | |
| "learning_rate": 1.330573167839005e-06, | |
| "loss": 0.1001, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 13.46829268292683, | |
| "grad_norm": 0.3750366270542145, | |
| "learning_rate": 1.2443403456474017e-06, | |
| "loss": 0.0932, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 13.585365853658537, | |
| "grad_norm": 0.21313251554965973, | |
| "learning_rate": 1.1605977698170001e-06, | |
| "loss": 0.0858, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 13.702439024390245, | |
| "grad_norm": 0.33804988861083984, | |
| "learning_rate": 1.0794009671164484e-06, | |
| "loss": 0.0965, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 13.819512195121952, | |
| "grad_norm": 0.40509283542633057, | |
| "learning_rate": 1.0008037763014033e-06, | |
| "loss": 0.0858, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 13.93658536585366, | |
| "grad_norm": 0.29031136631965637, | |
| "learning_rate": 9.248583124159438e-07, | |
| "loss": 0.0887, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 14.117073170731707, | |
| "grad_norm": 0.4998484253883362, | |
| "learning_rate": 8.516149322369055e-07, | |
| "loss": 0.1671, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 14.234146341463415, | |
| "grad_norm": 0.20616880059242249, | |
| "learning_rate": 7.811222008840719e-07, | |
| "loss": 0.0857, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 14.351219512195122, | |
| "grad_norm": 0.22407670319080353, | |
| "learning_rate": 7.13426859618338e-07, | |
| "loss": 0.0879, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 14.46829268292683, | |
| "grad_norm": 0.2743969261646271, | |
| "learning_rate": 6.485737948492237e-07, | |
| "loss": 0.0853, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 14.585365853658537, | |
| "grad_norm": 0.2952362596988678, | |
| "learning_rate": 5.866060083722624e-07, | |
| "loss": 0.0756, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 14.702439024390245, | |
| "grad_norm": 0.2747920751571655, | |
| "learning_rate": 5.275645888560233e-07, | |
| "loss": 0.0821, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 14.819512195121952, | |
| "grad_norm": 0.21030597388744354, | |
| "learning_rate": 4.71488684597643e-07, | |
| "loss": 0.0822, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 14.93658536585366, | |
| "grad_norm": 0.19308297336101532, | |
| "learning_rate": 4.184154775649768e-07, | |
| "loss": 0.0831, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 15.117073170731707, | |
| "grad_norm": 0.4680771231651306, | |
| "learning_rate": 3.683801587425251e-07, | |
| "loss": 0.1536, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 15.234146341463415, | |
| "grad_norm": 0.23554736375808716, | |
| "learning_rate": 3.214159047975324e-07, | |
| "loss": 0.0759, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 15.351219512195122, | |
| "grad_norm": 0.2371598780155182, | |
| "learning_rate": 2.7755385608169374e-07, | |
| "loss": 0.0799, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 15.46829268292683, | |
| "grad_norm": 0.23220933973789215, | |
| "learning_rate": 2.368230959830875e-07, | |
| "loss": 0.0906, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 15.585365853658537, | |
| "grad_norm": 0.20104503631591797, | |
| "learning_rate": 1.992506316419912e-07, | |
| "loss": 0.0743, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 15.702439024390245, | |
| "grad_norm": 0.2180931121110916, | |
| "learning_rate": 1.6486137604339813e-07, | |
| "loss": 0.0817, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 15.819512195121952, | |
| "grad_norm": 0.19550880789756775, | |
| "learning_rate": 1.3367813149808728e-07, | |
| "loss": 0.0761, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 15.93658536585366, | |
| "grad_norm": 0.19533437490463257, | |
| "learning_rate": 1.0572157452321097e-07, | |
| "loss": 0.0782, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 16.117073170731707, | |
| "grad_norm": 0.45543140172958374, | |
| "learning_rate": 8.101024213241826e-08, | |
| "loss": 0.1505, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 16.234146341463415, | |
| "grad_norm": 0.20492489635944366, | |
| "learning_rate": 5.9560519544614725e-08, | |
| "loss": 0.0716, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 16.351219512195122, | |
| "grad_norm": 0.16599130630493164, | |
| "learning_rate": 4.138662931949255e-08, | |
| "loss": 0.0768, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 16.46829268292683, | |
| "grad_norm": 0.17992687225341797, | |
| "learning_rate": 2.6500621927054716e-08, | |
| "loss": 0.0777, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 16.585365853658537, | |
| "grad_norm": 0.16531269252300262, | |
| "learning_rate": 1.4912367757366485e-08, | |
| "loss": 0.0802, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 16.702439024390245, | |
| "grad_norm": 0.18542739748954773, | |
| "learning_rate": 6.629550575847355e-09, | |
| "loss": 0.0754, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 16.819512195121952, | |
| "grad_norm": 0.16598491370677948, | |
| "learning_rate": 1.657662428434792e-09, | |
| "loss": 0.0773, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 16.93658536585366, | |
| "grad_norm": 0.18370650708675385, | |
| "learning_rate": 0.0, | |
| "loss": 0.0795, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 16.93658536585366, | |
| "step": 136, | |
| "total_flos": 487121640751104.0, | |
| "train_loss": 0.29537003558567343, | |
| "train_runtime": 25552.5852, | |
| "train_samples_per_second": 0.546, | |
| "train_steps_per_second": 0.005 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 136, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 17, | |
| "save_steps": 10000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 487121640751104.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |