| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9725490196078432, | |
| "eval_steps": 500, | |
| "global_step": 126, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.023529411764705882, | |
| "grad_norm": 6.901826858520508, | |
| "learning_rate": 7.692307692307694e-07, | |
| "loss": 1.1119, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.047058823529411764, | |
| "grad_norm": 7.422886371612549, | |
| "learning_rate": 1.5384615384615387e-06, | |
| "loss": 1.1537, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.07058823529411765, | |
| "grad_norm": 6.920129299163818, | |
| "learning_rate": 2.307692307692308e-06, | |
| "loss": 1.1205, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.09411764705882353, | |
| "grad_norm": 6.285982608795166, | |
| "learning_rate": 3.0769230769230774e-06, | |
| "loss": 1.0476, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.11764705882352941, | |
| "grad_norm": 5.431225299835205, | |
| "learning_rate": 3.846153846153847e-06, | |
| "loss": 1.0753, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1411764705882353, | |
| "grad_norm": 3.6898839473724365, | |
| "learning_rate": 4.615384615384616e-06, | |
| "loss": 0.9761, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.16470588235294117, | |
| "grad_norm": 2.8562192916870117, | |
| "learning_rate": 5.384615384615385e-06, | |
| "loss": 0.9581, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.18823529411764706, | |
| "grad_norm": 4.019009590148926, | |
| "learning_rate": 6.153846153846155e-06, | |
| "loss": 1.0193, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.21176470588235294, | |
| "grad_norm": 4.8232526779174805, | |
| "learning_rate": 6.923076923076923e-06, | |
| "loss": 0.951, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.23529411764705882, | |
| "grad_norm": 4.59989595413208, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 1.0139, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.25882352941176473, | |
| "grad_norm": 3.852893829345703, | |
| "learning_rate": 8.461538461538462e-06, | |
| "loss": 0.9087, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.2823529411764706, | |
| "grad_norm": 3.7579197883605957, | |
| "learning_rate": 9.230769230769232e-06, | |
| "loss": 1.0375, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.3058823529411765, | |
| "grad_norm": 2.5401360988616943, | |
| "learning_rate": 1e-05, | |
| "loss": 0.9651, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.32941176470588235, | |
| "grad_norm": 2.095151424407959, | |
| "learning_rate": 9.998067787472772e-06, | |
| "loss": 0.9098, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.35294117647058826, | |
| "grad_norm": 2.1526248455047607, | |
| "learning_rate": 9.992272643269181e-06, | |
| "loss": 0.8308, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.3764705882352941, | |
| "grad_norm": 1.9617197513580322, | |
| "learning_rate": 9.982619046369321e-06, | |
| "loss": 0.9148, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 1.5976275205612183, | |
| "learning_rate": 9.96911445789354e-06, | |
| "loss": 0.8948, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.4235294117647059, | |
| "grad_norm": 1.484428882598877, | |
| "learning_rate": 9.951769315335843e-06, | |
| "loss": 0.8592, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.4470588235294118, | |
| "grad_norm": 1.4591351747512817, | |
| "learning_rate": 9.930597024496933e-06, | |
| "loss": 0.8315, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.47058823529411764, | |
| "grad_norm": 1.1599817276000977, | |
| "learning_rate": 9.905613949123036e-06, | |
| "loss": 0.808, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.49411764705882355, | |
| "grad_norm": 1.2381192445755005, | |
| "learning_rate": 9.87683939825864e-06, | |
| "loss": 0.8833, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.5176470588235295, | |
| "grad_norm": 1.25748872756958, | |
| "learning_rate": 9.844295611322804e-06, | |
| "loss": 0.873, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.5411764705882353, | |
| "grad_norm": 1.0262051820755005, | |
| "learning_rate": 9.808007740920647e-06, | |
| "loss": 0.7802, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.5647058823529412, | |
| "grad_norm": 0.9352391362190247, | |
| "learning_rate": 9.768003833403278e-06, | |
| "loss": 0.8134, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.5882352941176471, | |
| "grad_norm": 1.0994199514389038, | |
| "learning_rate": 9.724314807191197e-06, | |
| "loss": 0.8358, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.611764705882353, | |
| "grad_norm": 0.9108858704566956, | |
| "learning_rate": 9.6769744288779e-06, | |
| "loss": 0.8229, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.6352941176470588, | |
| "grad_norm": 0.7783969044685364, | |
| "learning_rate": 9.626019287132202e-06, | |
| "loss": 0.7928, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.6588235294117647, | |
| "grad_norm": 0.8140386343002319, | |
| "learning_rate": 9.571488764419381e-06, | |
| "loss": 0.8129, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.6823529411764706, | |
| "grad_norm": 0.7752570509910583, | |
| "learning_rate": 9.51342500656308e-06, | |
| "loss": 0.8572, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.7058823529411765, | |
| "grad_norm": 0.678372323513031, | |
| "learning_rate": 9.451872890171419e-06, | |
| "loss": 0.8103, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7294117647058823, | |
| "grad_norm": 0.6800512075424194, | |
| "learning_rate": 9.386879987952549e-06, | |
| "loss": 0.897, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.7529411764705882, | |
| "grad_norm": 0.6305904388427734, | |
| "learning_rate": 9.318496531946411e-06, | |
| "loss": 0.818, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.7764705882352941, | |
| "grad_norm": 0.7293695211410522, | |
| "learning_rate": 9.246775374701139e-06, | |
| "loss": 0.8332, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.7706238031387329, | |
| "learning_rate": 9.171771948424138e-06, | |
| "loss": 0.8584, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.8235294117647058, | |
| "grad_norm": 0.6435885429382324, | |
| "learning_rate": 9.093544222139338e-06, | |
| "loss": 0.8726, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.8470588235294118, | |
| "grad_norm": 0.7461095452308655, | |
| "learning_rate": 9.012152656883824e-06, | |
| "loss": 0.7851, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.8705882352941177, | |
| "grad_norm": 0.8690148591995239, | |
| "learning_rate": 8.927660158978392e-06, | |
| "loss": 0.8347, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.8941176470588236, | |
| "grad_norm": 0.6226567625999451, | |
| "learning_rate": 8.84013203140821e-06, | |
| "loss": 0.8419, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.9176470588235294, | |
| "grad_norm": 0.603524923324585, | |
| "learning_rate": 8.749635923351108e-06, | |
| "loss": 0.776, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.9411764705882353, | |
| "grad_norm": 0.6303524374961853, | |
| "learning_rate": 8.656241777892544e-06, | |
| "loss": 0.7207, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9647058823529412, | |
| "grad_norm": 0.5369915962219238, | |
| "learning_rate": 8.56002177796765e-06, | |
| "loss": 0.7694, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.9882352941176471, | |
| "grad_norm": 0.5721177458763123, | |
| "learning_rate": 8.461050290572114e-06, | |
| "loss": 0.7732, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.0156862745098039, | |
| "grad_norm": 1.0953840017318726, | |
| "learning_rate": 8.359403809285054e-06, | |
| "loss": 1.3119, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.0392156862745099, | |
| "grad_norm": 0.5467516183853149, | |
| "learning_rate": 8.255160895148263e-06, | |
| "loss": 0.7665, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.0627450980392157, | |
| "grad_norm": 0.6043545007705688, | |
| "learning_rate": 8.14840211594757e-06, | |
| "loss": 0.7368, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.0862745098039215, | |
| "grad_norm": 0.6263077259063721, | |
| "learning_rate": 8.039209983943201e-06, | |
| "loss": 0.7976, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.1098039215686275, | |
| "grad_norm": 0.5699981451034546, | |
| "learning_rate": 7.927668892097288e-06, | |
| "loss": 0.7109, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.1333333333333333, | |
| "grad_norm": 0.5554935336112976, | |
| "learning_rate": 7.81386504884782e-06, | |
| "loss": 0.738, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.156862745098039, | |
| "grad_norm": 0.7168362736701965, | |
| "learning_rate": 7.697886411479422e-06, | |
| "loss": 0.8269, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.1803921568627451, | |
| "grad_norm": 0.5106287002563477, | |
| "learning_rate": 7.579822618142505e-06, | |
| "loss": 0.7993, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.203921568627451, | |
| "grad_norm": 0.7224318385124207, | |
| "learning_rate": 7.459764918573264e-06, | |
| "loss": 0.8325, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.227450980392157, | |
| "grad_norm": 0.6035781502723694, | |
| "learning_rate": 7.3378061035681415e-06, | |
| "loss": 0.73, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.2509803921568627, | |
| "grad_norm": 0.5344979166984558, | |
| "learning_rate": 7.2140404332671986e-06, | |
| "loss": 0.7398, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.2745098039215685, | |
| "grad_norm": 0.6349881291389465, | |
| "learning_rate": 7.088563564301874e-06, | |
| "loss": 0.8289, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.2980392156862746, | |
| "grad_norm": 0.5782693028450012, | |
| "learning_rate": 6.961472475863406e-06, | |
| "loss": 0.7454, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.3215686274509804, | |
| "grad_norm": 0.4461568295955658, | |
| "learning_rate": 6.832865394749065e-06, | |
| "loss": 0.6694, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.3450980392156864, | |
| "grad_norm": 0.5781851410865784, | |
| "learning_rate": 6.702841719444141e-06, | |
| "loss": 0.8093, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.3686274509803922, | |
| "grad_norm": 0.48329582810401917, | |
| "learning_rate": 6.571501943298335e-06, | |
| "loss": 0.7096, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.392156862745098, | |
| "grad_norm": 0.6234760284423828, | |
| "learning_rate": 6.4389475768559675e-06, | |
| "loss": 0.8138, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.415686274509804, | |
| "grad_norm": 0.49070653319358826, | |
| "learning_rate": 6.305281069399989e-06, | |
| "loss": 0.6619, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.4392156862745098, | |
| "grad_norm": 0.49748146533966064, | |
| "learning_rate": 6.17060572977047e-06, | |
| "loss": 0.693, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.4627450980392158, | |
| "grad_norm": 0.5554483532905579, | |
| "learning_rate": 6.035025646518747e-06, | |
| "loss": 0.7561, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.4862745098039216, | |
| "grad_norm": 0.6127786040306091, | |
| "learning_rate": 5.898645607458941e-06, | |
| "loss": 0.7799, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 1.5098039215686274, | |
| "grad_norm": 0.5526847839355469, | |
| "learning_rate": 5.761571018679025e-06, | |
| "loss": 0.7374, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.5333333333333332, | |
| "grad_norm": 0.5685780644416809, | |
| "learning_rate": 5.623907823074044e-06, | |
| "loss": 0.8134, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.5568627450980392, | |
| "grad_norm": 0.4792926013469696, | |
| "learning_rate": 5.48576241846443e-06, | |
| "loss": 0.7933, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.5803921568627453, | |
| "grad_norm": 0.4758462607860565, | |
| "learning_rate": 5.347241575362729e-06, | |
| "loss": 0.7209, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 1.603921568627451, | |
| "grad_norm": 0.5107057690620422, | |
| "learning_rate": 5.208452354452275e-06, | |
| "loss": 0.7746, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.6274509803921569, | |
| "grad_norm": 0.4799031913280487, | |
| "learning_rate": 5.069502023841576e-06, | |
| "loss": 0.7635, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 1.6509803921568627, | |
| "grad_norm": 0.5203085541725159, | |
| "learning_rate": 4.9304979761584256e-06, | |
| "loss": 0.7708, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.6745098039215687, | |
| "grad_norm": 0.44460946321487427, | |
| "learning_rate": 4.791547645547727e-06, | |
| "loss": 0.6827, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 1.6980392156862745, | |
| "grad_norm": 0.5535275340080261, | |
| "learning_rate": 4.652758424637271e-06, | |
| "loss": 0.794, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.7215686274509805, | |
| "grad_norm": 0.4878956377506256, | |
| "learning_rate": 4.514237581535571e-06, | |
| "loss": 0.7368, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 1.7450980392156863, | |
| "grad_norm": 0.5016121864318848, | |
| "learning_rate": 4.3760921769259585e-06, | |
| "loss": 0.6936, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 1.768627450980392, | |
| "grad_norm": 0.5011301040649414, | |
| "learning_rate": 4.2384289813209754e-06, | |
| "loss": 0.7475, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.792156862745098, | |
| "grad_norm": 0.4553963243961334, | |
| "learning_rate": 4.101354392541061e-06, | |
| "loss": 0.7358, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.815686274509804, | |
| "grad_norm": 0.4620165228843689, | |
| "learning_rate": 3.964974353481254e-06, | |
| "loss": 0.7331, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.83921568627451, | |
| "grad_norm": 0.4453507363796234, | |
| "learning_rate": 3.829394270229531e-06, | |
| "loss": 0.7295, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.8627450980392157, | |
| "grad_norm": 0.402537077665329, | |
| "learning_rate": 3.694718930600012e-06, | |
| "loss": 0.642, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.8862745098039215, | |
| "grad_norm": 0.4362320601940155, | |
| "learning_rate": 3.5610524231440324e-06, | |
| "loss": 0.7889, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.9098039215686273, | |
| "grad_norm": 0.43875452876091003, | |
| "learning_rate": 3.428498056701665e-06, | |
| "loss": 0.7499, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.9333333333333333, | |
| "grad_norm": 0.43475160002708435, | |
| "learning_rate": 3.2971582805558622e-06, | |
| "loss": 0.7663, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.9568627450980394, | |
| "grad_norm": 0.46115896105766296, | |
| "learning_rate": 3.167134605250938e-06, | |
| "loss": 0.7652, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.9803921568627452, | |
| "grad_norm": 0.4670518934726715, | |
| "learning_rate": 3.0385275241365965e-06, | |
| "loss": 0.7709, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.007843137254902, | |
| "grad_norm": 0.8610158562660217, | |
| "learning_rate": 2.9114364356981274e-06, | |
| "loss": 1.2373, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.0313725490196077, | |
| "grad_norm": 0.3888493478298187, | |
| "learning_rate": 2.7859595667328027e-06, | |
| "loss": 0.7255, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 2.0549019607843135, | |
| "grad_norm": 0.42477184534072876, | |
| "learning_rate": 2.6621938964318593e-06, | |
| "loss": 0.6407, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 2.0784313725490198, | |
| "grad_norm": 0.4506017863750458, | |
| "learning_rate": 2.5402350814267364e-06, | |
| "loss": 0.6873, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 2.1019607843137256, | |
| "grad_norm": 0.48074784874916077, | |
| "learning_rate": 2.4201773818574956e-06, | |
| "loss": 0.6542, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 2.1254901960784314, | |
| "grad_norm": 0.45760810375213623, | |
| "learning_rate": 2.302113588520578e-06, | |
| "loss": 0.6809, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.149019607843137, | |
| "grad_norm": 0.3958469033241272, | |
| "learning_rate": 2.1861349511521817e-06, | |
| "loss": 0.6087, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 2.172549019607843, | |
| "grad_norm": 0.479245662689209, | |
| "learning_rate": 2.072331107902713e-06, | |
| "loss": 0.9135, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 2.196078431372549, | |
| "grad_norm": 0.4283719062805176, | |
| "learning_rate": 1.960790016056801e-06, | |
| "loss": 0.6736, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 2.219607843137255, | |
| "grad_norm": 0.4299345314502716, | |
| "learning_rate": 1.8515978840524302e-06, | |
| "loss": 0.6972, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 2.243137254901961, | |
| "grad_norm": 0.45547375082969666, | |
| "learning_rate": 1.7448391048517378e-06, | |
| "loss": 0.7224, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 2.2666666666666666, | |
| "grad_norm": 0.40750595927238464, | |
| "learning_rate": 1.640596190714947e-06, | |
| "loss": 0.7225, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 2.2901960784313724, | |
| "grad_norm": 0.43789613246917725, | |
| "learning_rate": 1.5389497094278861e-06, | |
| "loss": 0.7208, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 2.313725490196078, | |
| "grad_norm": 0.4415332078933716, | |
| "learning_rate": 1.4399782220323515e-06, | |
| "loss": 0.6706, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 2.3372549019607844, | |
| "grad_norm": 0.49981608986854553, | |
| "learning_rate": 1.3437582221074574e-06, | |
| "loss": 0.7925, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 2.3607843137254902, | |
| "grad_norm": 0.48716047406196594, | |
| "learning_rate": 1.250364076648894e-06, | |
| "loss": 0.7385, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.384313725490196, | |
| "grad_norm": 0.3869420886039734, | |
| "learning_rate": 1.1598679685917901e-06, | |
| "loss": 0.6665, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 2.407843137254902, | |
| "grad_norm": 0.4081011116504669, | |
| "learning_rate": 1.0723398410216085e-06, | |
| "loss": 0.8291, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 2.431372549019608, | |
| "grad_norm": 0.4056829512119293, | |
| "learning_rate": 9.878473431161767e-07, | |
| "loss": 0.6668, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 2.454901960784314, | |
| "grad_norm": 0.36377865076065063, | |
| "learning_rate": 9.064557778606631e-07, | |
| "loss": 0.6017, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 2.4784313725490197, | |
| "grad_norm": 0.43392133712768555, | |
| "learning_rate": 8.282280515758639e-07, | |
| "loss": 0.7824, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 2.5019607843137255, | |
| "grad_norm": 0.47024935483932495, | |
| "learning_rate": 7.532246252988617e-07, | |
| "loss": 0.7446, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 2.5254901960784313, | |
| "grad_norm": 0.425630658864975, | |
| "learning_rate": 6.815034680535915e-07, | |
| "loss": 0.713, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 2.549019607843137, | |
| "grad_norm": 0.40889060497283936, | |
| "learning_rate": 6.131200120474512e-07, | |
| "loss": 0.7409, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 2.572549019607843, | |
| "grad_norm": 0.40218353271484375, | |
| "learning_rate": 5.481271098285818e-07, | |
| "loss": 0.7501, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 2.596078431372549, | |
| "grad_norm": 0.35820406675338745, | |
| "learning_rate": 4.865749934369224e-07, | |
| "loss": 0.6082, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.619607843137255, | |
| "grad_norm": 0.42070600390434265, | |
| "learning_rate": 4.2851123558061927e-07, | |
| "loss": 0.7516, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 2.6431372549019607, | |
| "grad_norm": 0.36287838220596313, | |
| "learning_rate": 3.739807128677986e-07, | |
| "loss": 0.6589, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.3751106560230255, | |
| "learning_rate": 3.230255711220992e-07, | |
| "loss": 0.7008, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 2.6901960784313728, | |
| "grad_norm": 0.39373522996902466, | |
| "learning_rate": 2.756851928088056e-07, | |
| "loss": 0.7579, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 2.7137254901960786, | |
| "grad_norm": 0.3703792989253998, | |
| "learning_rate": 2.3199616659672352e-07, | |
| "loss": 0.8005, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.7372549019607844, | |
| "grad_norm": 0.3416251242160797, | |
| "learning_rate": 1.9199225907935492e-07, | |
| "loss": 0.6913, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 2.76078431372549, | |
| "grad_norm": 0.3666976988315582, | |
| "learning_rate": 1.5570438867719695e-07, | |
| "loss": 0.6749, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 2.784313725490196, | |
| "grad_norm": 0.41929343342781067, | |
| "learning_rate": 1.2316060174136e-07, | |
| "loss": 0.93, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 2.8078431372549018, | |
| "grad_norm": 0.3298085033893585, | |
| "learning_rate": 9.43860508769645e-08, | |
| "loss": 0.5853, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 2.831372549019608, | |
| "grad_norm": 0.4238205552101135, | |
| "learning_rate": 6.940297550306895e-08, | |
| "loss": 0.7548, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.854901960784314, | |
| "grad_norm": 0.37445592880249023, | |
| "learning_rate": 4.823068466415615e-08, | |
| "loss": 0.7453, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 2.8784313725490196, | |
| "grad_norm": 0.42801082134246826, | |
| "learning_rate": 3.088554210646133e-08, | |
| "loss": 0.8001, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 2.9019607843137254, | |
| "grad_norm": 0.3497636616230011, | |
| "learning_rate": 1.7380953630678488e-08, | |
| "loss": 0.7289, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 2.9254901960784316, | |
| "grad_norm": 0.4116727411746979, | |
| "learning_rate": 7.727356730820035e-09, | |
| "loss": 0.6974, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 2.9490196078431374, | |
| "grad_norm": 0.3742615282535553, | |
| "learning_rate": 1.9322125272297488e-09, | |
| "loss": 0.765, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.9725490196078432, | |
| "grad_norm": 0.36835694313049316, | |
| "learning_rate": 0.0, | |
| "loss": 0.6713, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 2.9725490196078432, | |
| "step": 126, | |
| "total_flos": 121055548211200.0, | |
| "train_loss": 0.7961960165273576, | |
| "train_runtime": 4088.556, | |
| "train_samples_per_second": 2.986, | |
| "train_steps_per_second": 0.031 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 126, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 121055548211200.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |