| { |
| "best_global_step": 493, |
| "best_metric": 0.11202231794595718, |
| "best_model_checkpoint": "saves_stability/ia3/llama-3-8b-instruct/train_cb_1757340171/checkpoint-493", |
| "epoch": 10.0, |
| "eval_steps": 29, |
| "global_step": 570, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08771929824561403, |
| "grad_norm": 3.4133479595184326, |
| "learning_rate": 3.5087719298245615e-06, |
| "loss": 1.1315, |
| "num_input_tokens_seen": 3200, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.17543859649122806, |
| "grad_norm": 3.076221227645874, |
| "learning_rate": 7.894736842105263e-06, |
| "loss": 1.1342, |
| "num_input_tokens_seen": 6400, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2631578947368421, |
| "grad_norm": 2.6336629390716553, |
| "learning_rate": 1.2280701754385964e-05, |
| "loss": 1.1087, |
| "num_input_tokens_seen": 9312, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.3508771929824561, |
| "grad_norm": 3.1476151943206787, |
| "learning_rate": 1.6666666666666667e-05, |
| "loss": 1.1575, |
| "num_input_tokens_seen": 12384, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.43859649122807015, |
| "grad_norm": 3.560323476791382, |
| "learning_rate": 2.105263157894737e-05, |
| "loss": 1.2268, |
| "num_input_tokens_seen": 15360, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.5087719298245614, |
| "eval_loss": 1.1511393785476685, |
| "eval_runtime": 0.698, |
| "eval_samples_per_second": 35.815, |
| "eval_steps_per_second": 10.028, |
| "num_input_tokens_seen": 18048, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.5263157894736842, |
| "grad_norm": 3.018550157546997, |
| "learning_rate": 2.5438596491228074e-05, |
| "loss": 1.1543, |
| "num_input_tokens_seen": 18496, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.6140350877192983, |
| "grad_norm": 3.770270824432373, |
| "learning_rate": 2.9824561403508772e-05, |
| "loss": 1.2276, |
| "num_input_tokens_seen": 23008, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.7017543859649122, |
| "grad_norm": 3.1533043384552, |
| "learning_rate": 3.421052631578947e-05, |
| "loss": 1.2977, |
| "num_input_tokens_seen": 26336, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.7894736842105263, |
| "grad_norm": 2.404344320297241, |
| "learning_rate": 3.859649122807018e-05, |
| "loss": 1.2464, |
| "num_input_tokens_seen": 29568, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.8771929824561403, |
| "grad_norm": 2.784658193588257, |
| "learning_rate": 4.298245614035088e-05, |
| "loss": 1.2657, |
| "num_input_tokens_seen": 32864, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.9649122807017544, |
| "grad_norm": 3.5261030197143555, |
| "learning_rate": 4.736842105263158e-05, |
| "loss": 1.1823, |
| "num_input_tokens_seen": 35712, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.0175438596491229, |
| "eval_loss": 1.1511393785476685, |
| "eval_runtime": 0.6672, |
| "eval_samples_per_second": 37.471, |
| "eval_steps_per_second": 10.492, |
| "num_input_tokens_seen": 36928, |
| "step": 58 |
| }, |
| { |
| "epoch": 1.0526315789473684, |
| "grad_norm": 2.227071523666382, |
| "learning_rate": 4.999812487773597e-05, |
| "loss": 1.0802, |
| "num_input_tokens_seen": 37824, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.1403508771929824, |
| "grad_norm": 2.616727113723755, |
| "learning_rate": 4.997703298253406e-05, |
| "loss": 1.2189, |
| "num_input_tokens_seen": 40576, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.2280701754385965, |
| "grad_norm": 3.264575242996216, |
| "learning_rate": 4.993252512887069e-05, |
| "loss": 1.2454, |
| "num_input_tokens_seen": 43360, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.3157894736842106, |
| "grad_norm": 3.2736499309539795, |
| "learning_rate": 4.986464304284091e-05, |
| "loss": 1.1632, |
| "num_input_tokens_seen": 45824, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.4035087719298245, |
| "grad_norm": 2.3630354404449463, |
| "learning_rate": 4.977345036387331e-05, |
| "loss": 0.9835, |
| "num_input_tokens_seen": 49312, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.4912280701754386, |
| "grad_norm": 1.8746641874313354, |
| "learning_rate": 4.965903258506806e-05, |
| "loss": 0.7109, |
| "num_input_tokens_seen": 52704, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.526315789473684, |
| "eval_loss": 0.4986986517906189, |
| "eval_runtime": 0.6669, |
| "eval_samples_per_second": 37.486, |
| "eval_steps_per_second": 10.496, |
| "num_input_tokens_seen": 54176, |
| "step": 87 |
| }, |
| { |
| "epoch": 1.5789473684210527, |
| "grad_norm": 2.7547216415405273, |
| "learning_rate": 4.952149697304716e-05, |
| "loss": 0.558, |
| "num_input_tokens_seen": 56352, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 1.8518539667129517, |
| "learning_rate": 4.9360972467392056e-05, |
| "loss": 0.5075, |
| "num_input_tokens_seen": 58944, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.7543859649122808, |
| "grad_norm": 2.5723066329956055, |
| "learning_rate": 4.917760955976277e-05, |
| "loss": 0.3382, |
| "num_input_tokens_seen": 62176, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.8421052631578947, |
| "grad_norm": 0.9754212498664856, |
| "learning_rate": 4.897158015281209e-05, |
| "loss": 0.1172, |
| "num_input_tokens_seen": 65504, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.9298245614035088, |
| "grad_norm": 1.365744948387146, |
| "learning_rate": 4.874307739902689e-05, |
| "loss": 0.2348, |
| "num_input_tokens_seen": 69312, |
| "step": 110 |
| }, |
| { |
| "epoch": 2.017543859649123, |
| "grad_norm": 0.6378192901611328, |
| "learning_rate": 4.849231551964771e-05, |
| "loss": 0.3782, |
| "num_input_tokens_seen": 72464, |
| "step": 115 |
| }, |
| { |
| "epoch": 2.0350877192982457, |
| "eval_loss": 0.14016123116016388, |
| "eval_runtime": 0.6682, |
| "eval_samples_per_second": 37.416, |
| "eval_steps_per_second": 10.476, |
| "num_input_tokens_seen": 73136, |
| "step": 116 |
| }, |
| { |
| "epoch": 2.1052631578947367, |
| "grad_norm": 1.676712989807129, |
| "learning_rate": 4.821952960383649e-05, |
| "loss": 0.1671, |
| "num_input_tokens_seen": 75472, |
| "step": 120 |
| }, |
| { |
| "epoch": 2.192982456140351, |
| "grad_norm": 0.279497355222702, |
| "learning_rate": 4.7924975388280524e-05, |
| "loss": 0.0982, |
| "num_input_tokens_seen": 78480, |
| "step": 125 |
| }, |
| { |
| "epoch": 2.280701754385965, |
| "grad_norm": 0.7541007399559021, |
| "learning_rate": 4.760892901743944e-05, |
| "loss": 0.1281, |
| "num_input_tokens_seen": 81840, |
| "step": 130 |
| }, |
| { |
| "epoch": 2.3684210526315788, |
| "grad_norm": 2.5743348598480225, |
| "learning_rate": 4.727168678465988e-05, |
| "loss": 0.256, |
| "num_input_tokens_seen": 85456, |
| "step": 135 |
| }, |
| { |
| "epoch": 2.456140350877193, |
| "grad_norm": 0.3511086404323578, |
| "learning_rate": 4.6913564854400595e-05, |
| "loss": 0.4613, |
| "num_input_tokens_seen": 88304, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "grad_norm": 1.0270861387252808, |
| "learning_rate": 4.6534898965828405e-05, |
| "loss": 0.2032, |
| "num_input_tokens_seen": 91216, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "eval_loss": 0.12335850298404694, |
| "eval_runtime": 0.6637, |
| "eval_samples_per_second": 37.666, |
| "eval_steps_per_second": 10.546, |
| "num_input_tokens_seen": 91216, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.6315789473684212, |
| "grad_norm": 1.3082849979400635, |
| "learning_rate": 4.613604411806285e-05, |
| "loss": 0.1059, |
| "num_input_tokens_seen": 94512, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.719298245614035, |
| "grad_norm": 0.2827344238758087, |
| "learning_rate": 4.5717374237364665e-05, |
| "loss": 0.1618, |
| "num_input_tokens_seen": 98000, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.807017543859649, |
| "grad_norm": 0.5241649150848389, |
| "learning_rate": 4.5279281826580056e-05, |
| "loss": 0.2007, |
| "num_input_tokens_seen": 101232, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.8947368421052633, |
| "grad_norm": 1.066519856452942, |
| "learning_rate": 4.482217759716946e-05, |
| "loss": 0.1269, |
| "num_input_tokens_seen": 104752, |
| "step": 165 |
| }, |
| { |
| "epoch": 2.982456140350877, |
| "grad_norm": 0.659456729888916, |
| "learning_rate": 4.434649008416565e-05, |
| "loss": 0.1273, |
| "num_input_tokens_seen": 108080, |
| "step": 170 |
| }, |
| { |
| "epoch": 3.0526315789473686, |
| "eval_loss": 0.12071878463029861, |
| "eval_runtime": 0.6642, |
| "eval_samples_per_second": 37.638, |
| "eval_steps_per_second": 10.539, |
| "num_input_tokens_seen": 110696, |
| "step": 174 |
| }, |
| { |
| "epoch": 3.0701754385964914, |
| "grad_norm": 1.3794559240341187, |
| "learning_rate": 4.385266524442241e-05, |
| "loss": 0.5929, |
| "num_input_tokens_seen": 111464, |
| "step": 175 |
| }, |
| { |
| "epoch": 3.1578947368421053, |
| "grad_norm": 0.23271651566028595, |
| "learning_rate": 4.334116603853007e-05, |
| "loss": 0.1888, |
| "num_input_tokens_seen": 114056, |
| "step": 180 |
| }, |
| { |
| "epoch": 3.245614035087719, |
| "grad_norm": 0.8489636182785034, |
| "learning_rate": 4.2812471996790206e-05, |
| "loss": 0.1897, |
| "num_input_tokens_seen": 116872, |
| "step": 185 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 1.2945408821105957, |
| "learning_rate": 4.226707876965611e-05, |
| "loss": 0.1582, |
| "num_input_tokens_seen": 120424, |
| "step": 190 |
| }, |
| { |
| "epoch": 3.4210526315789473, |
| "grad_norm": 3.2085044384002686, |
| "learning_rate": 4.1705497663060767e-05, |
| "loss": 0.1476, |
| "num_input_tokens_seen": 124008, |
| "step": 195 |
| }, |
| { |
| "epoch": 3.5087719298245617, |
| "grad_norm": 0.21600987017154694, |
| "learning_rate": 4.1128255159067665e-05, |
| "loss": 0.2137, |
| "num_input_tokens_seen": 127496, |
| "step": 200 |
| }, |
| { |
| "epoch": 3.56140350877193, |
| "eval_loss": 0.1199546679854393, |
| "eval_runtime": 0.6641, |
| "eval_samples_per_second": 37.647, |
| "eval_steps_per_second": 10.541, |
| "num_input_tokens_seen": 129448, |
| "step": 203 |
| }, |
| { |
| "epoch": 3.5964912280701755, |
| "grad_norm": 0.7123563289642334, |
| "learning_rate": 4.053589242229412e-05, |
| "loss": 0.1239, |
| "num_input_tokens_seen": 130888, |
| "step": 205 |
| }, |
| { |
| "epoch": 3.6842105263157894, |
| "grad_norm": 2.729929208755493, |
| "learning_rate": 3.9928964792569655e-05, |
| "loss": 0.2573, |
| "num_input_tokens_seen": 133896, |
| "step": 210 |
| }, |
| { |
| "epoch": 3.7719298245614032, |
| "grad_norm": 0.5488488674163818, |
| "learning_rate": 3.930804126430513e-05, |
| "loss": 0.141, |
| "num_input_tokens_seen": 137160, |
| "step": 215 |
| }, |
| { |
| "epoch": 3.8596491228070176, |
| "grad_norm": 0.8103274703025818, |
| "learning_rate": 3.867370395306068e-05, |
| "loss": 0.1857, |
| "num_input_tokens_seen": 140328, |
| "step": 220 |
| }, |
| { |
| "epoch": 3.9473684210526314, |
| "grad_norm": 0.6853814721107483, |
| "learning_rate": 3.8026547549812665e-05, |
| "loss": 0.0602, |
| "num_input_tokens_seen": 143368, |
| "step": 225 |
| }, |
| { |
| "epoch": 4.035087719298246, |
| "grad_norm": 2.4486083984375, |
| "learning_rate": 3.736717876343106e-05, |
| "loss": 0.1925, |
| "num_input_tokens_seen": 145832, |
| "step": 230 |
| }, |
| { |
| "epoch": 4.0701754385964914, |
| "eval_loss": 0.11746034771203995, |
| "eval_runtime": 0.6677, |
| "eval_samples_per_second": 37.444, |
| "eval_steps_per_second": 10.484, |
| "num_input_tokens_seen": 147176, |
| "step": 232 |
| }, |
| { |
| "epoch": 4.12280701754386, |
| "grad_norm": 3.295849084854126, |
| "learning_rate": 3.66962157518902e-05, |
| "loss": 0.2333, |
| "num_input_tokens_seen": 148712, |
| "step": 235 |
| }, |
| { |
| "epoch": 4.2105263157894735, |
| "grad_norm": 0.4772413969039917, |
| "learning_rate": 3.601428754274584e-05, |
| "loss": 0.1303, |
| "num_input_tokens_seen": 151752, |
| "step": 240 |
| }, |
| { |
| "epoch": 4.298245614035087, |
| "grad_norm": 0.4736262261867523, |
| "learning_rate": 3.532203344342212e-05, |
| "loss": 0.1095, |
| "num_input_tokens_seen": 154760, |
| "step": 245 |
| }, |
| { |
| "epoch": 4.385964912280702, |
| "grad_norm": 0.5700939893722534, |
| "learning_rate": 3.4620102441861143e-05, |
| "loss": 0.2019, |
| "num_input_tokens_seen": 157896, |
| "step": 250 |
| }, |
| { |
| "epoch": 4.473684210526316, |
| "grad_norm": 0.14286300539970398, |
| "learning_rate": 3.390915259809696e-05, |
| "loss": 0.1429, |
| "num_input_tokens_seen": 160520, |
| "step": 255 |
| }, |
| { |
| "epoch": 4.56140350877193, |
| "grad_norm": 0.5493804216384888, |
| "learning_rate": 3.318985042732461e-05, |
| "loss": 0.3175, |
| "num_input_tokens_seen": 163880, |
| "step": 260 |
| }, |
| { |
| "epoch": 4.578947368421053, |
| "eval_loss": 0.11846113950014114, |
| "eval_runtime": 0.6615, |
| "eval_samples_per_second": 37.792, |
| "eval_steps_per_second": 10.582, |
| "num_input_tokens_seen": 164424, |
| "step": 261 |
| }, |
| { |
| "epoch": 4.649122807017544, |
| "grad_norm": 0.2676703631877899, |
| "learning_rate": 3.246287027504237e-05, |
| "loss": 0.1984, |
| "num_input_tokens_seen": 167464, |
| "step": 265 |
| }, |
| { |
| "epoch": 4.7368421052631575, |
| "grad_norm": 1.3445910215377808, |
| "learning_rate": 3.172889368485311e-05, |
| "loss": 0.0674, |
| "num_input_tokens_seen": 170792, |
| "step": 270 |
| }, |
| { |
| "epoch": 4.824561403508772, |
| "grad_norm": 0.6961261034011841, |
| "learning_rate": 3.0988608759517475e-05, |
| "loss": 0.086, |
| "num_input_tokens_seen": 173480, |
| "step": 275 |
| }, |
| { |
| "epoch": 4.912280701754386, |
| "grad_norm": 0.2509939670562744, |
| "learning_rate": 3.0242709515857758e-05, |
| "loss": 0.086, |
| "num_input_tokens_seen": 177544, |
| "step": 280 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 0.03989564999938011, |
| "learning_rate": 2.949189523411747e-05, |
| "loss": 0.1034, |
| "num_input_tokens_seen": 180504, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "grad_norm": 0.6623712182044983, |
| "learning_rate": 2.8736869802386364e-05, |
| "loss": 0.0705, |
| "num_input_tokens_seen": 183416, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "eval_loss": 0.11734068393707275, |
| "eval_runtime": 0.6679, |
| "eval_samples_per_second": 37.433, |
| "eval_steps_per_second": 10.481, |
| "num_input_tokens_seen": 183416, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.175438596491228, |
| "grad_norm": 0.5438898801803589, |
| "learning_rate": 2.797834105670559e-05, |
| "loss": 0.1078, |
| "num_input_tokens_seen": 186712, |
| "step": 295 |
| }, |
| { |
| "epoch": 5.2631578947368425, |
| "grad_norm": 0.7463061213493347, |
| "learning_rate": 2.7217020117471793e-05, |
| "loss": 0.1354, |
| "num_input_tokens_seen": 189912, |
| "step": 300 |
| }, |
| { |
| "epoch": 5.350877192982456, |
| "grad_norm": 0.3271144926548004, |
| "learning_rate": 2.6453620722761896e-05, |
| "loss": 0.1104, |
| "num_input_tokens_seen": 193624, |
| "step": 305 |
| }, |
| { |
| "epoch": 5.43859649122807, |
| "grad_norm": 0.3201896846294403, |
| "learning_rate": 2.5688858559204053e-05, |
| "loss": 0.0781, |
| "num_input_tokens_seen": 197720, |
| "step": 310 |
| }, |
| { |
| "epoch": 5.526315789473684, |
| "grad_norm": 0.5247319340705872, |
| "learning_rate": 2.492345059102164e-05, |
| "loss": 0.3848, |
| "num_input_tokens_seen": 200856, |
| "step": 315 |
| }, |
| { |
| "epoch": 5.5964912280701755, |
| "eval_loss": 0.11480677127838135, |
| "eval_runtime": 0.6649, |
| "eval_samples_per_second": 37.6, |
| "eval_steps_per_second": 10.528, |
| "num_input_tokens_seen": 203256, |
| "step": 319 |
| }, |
| { |
| "epoch": 5.614035087719298, |
| "grad_norm": 1.0066843032836914, |
| "learning_rate": 2.4158114387879616e-05, |
| "loss": 0.1725, |
| "num_input_tokens_seen": 203800, |
| "step": 320 |
| }, |
| { |
| "epoch": 5.701754385964913, |
| "grad_norm": 2.24448823928833, |
| "learning_rate": 2.3393567452163252e-05, |
| "loss": 0.2426, |
| "num_input_tokens_seen": 207064, |
| "step": 325 |
| }, |
| { |
| "epoch": 5.7894736842105265, |
| "grad_norm": 1.241878867149353, |
| "learning_rate": 2.2630526546319914e-05, |
| "loss": 0.1561, |
| "num_input_tokens_seen": 210168, |
| "step": 330 |
| }, |
| { |
| "epoch": 5.87719298245614, |
| "grad_norm": 0.32216542959213257, |
| "learning_rate": 2.186970702089457e-05, |
| "loss": 0.0768, |
| "num_input_tokens_seen": 213080, |
| "step": 335 |
| }, |
| { |
| "epoch": 5.964912280701754, |
| "grad_norm": 0.26279038190841675, |
| "learning_rate": 2.111182214388893e-05, |
| "loss": 0.1292, |
| "num_input_tokens_seen": 216152, |
| "step": 340 |
| }, |
| { |
| "epoch": 6.052631578947368, |
| "grad_norm": 0.7196716070175171, |
| "learning_rate": 2.0357582432072957e-05, |
| "loss": 0.0606, |
| "num_input_tokens_seen": 218736, |
| "step": 345 |
| }, |
| { |
| "epoch": 6.105263157894737, |
| "eval_loss": 0.11378468573093414, |
| "eval_runtime": 0.662, |
| "eval_samples_per_second": 37.762, |
| "eval_steps_per_second": 10.573, |
| "num_input_tokens_seen": 220912, |
| "step": 348 |
| }, |
| { |
| "epoch": 6.140350877192983, |
| "grad_norm": 0.293470561504364, |
| "learning_rate": 1.9607694984875754e-05, |
| "loss": 0.2255, |
| "num_input_tokens_seen": 222160, |
| "step": 350 |
| }, |
| { |
| "epoch": 6.228070175438597, |
| "grad_norm": 0.7847577333450317, |
| "learning_rate": 1.8862862821480025e-05, |
| "loss": 0.2139, |
| "num_input_tokens_seen": 225904, |
| "step": 355 |
| }, |
| { |
| "epoch": 6.315789473684211, |
| "grad_norm": 0.18969027698040009, |
| "learning_rate": 1.8123784221741964e-05, |
| "loss": 0.111, |
| "num_input_tokens_seen": 228912, |
| "step": 360 |
| }, |
| { |
| "epoch": 6.4035087719298245, |
| "grad_norm": 1.7946451902389526, |
| "learning_rate": 1.73911520715541e-05, |
| "loss": 0.2102, |
| "num_input_tokens_seen": 232496, |
| "step": 365 |
| }, |
| { |
| "epoch": 6.491228070175438, |
| "grad_norm": 1.3251816034317017, |
| "learning_rate": 1.666565321326512e-05, |
| "loss": 0.1769, |
| "num_input_tokens_seen": 235792, |
| "step": 370 |
| }, |
| { |
| "epoch": 6.578947368421053, |
| "grad_norm": 1.340169072151184, |
| "learning_rate": 1.5947967801765345e-05, |
| "loss": 0.122, |
| "num_input_tokens_seen": 238960, |
| "step": 375 |
| }, |
| { |
| "epoch": 6.614035087719298, |
| "eval_loss": 0.11551959812641144, |
| "eval_runtime": 0.6604, |
| "eval_samples_per_second": 37.854, |
| "eval_steps_per_second": 10.599, |
| "num_input_tokens_seen": 240336, |
| "step": 377 |
| }, |
| { |
| "epoch": 6.666666666666667, |
| "grad_norm": 1.2249866724014282, |
| "learning_rate": 1.5238768666841907e-05, |
| "loss": 0.1548, |
| "num_input_tokens_seen": 242064, |
| "step": 380 |
| }, |
| { |
| "epoch": 6.754385964912281, |
| "grad_norm": 0.2892046868801117, |
| "learning_rate": 1.4538720682400969e-05, |
| "loss": 0.167, |
| "num_input_tokens_seen": 245328, |
| "step": 385 |
| }, |
| { |
| "epoch": 6.842105263157895, |
| "grad_norm": 1.0948004722595215, |
| "learning_rate": 1.3848480143148839e-05, |
| "loss": 0.1475, |
| "num_input_tokens_seen": 248144, |
| "step": 390 |
| }, |
| { |
| "epoch": 6.9298245614035086, |
| "grad_norm": 0.28923431038856506, |
| "learning_rate": 1.3168694149315796e-05, |
| "loss": 0.0546, |
| "num_input_tokens_seen": 250928, |
| "step": 395 |
| }, |
| { |
| "epoch": 7.017543859649122, |
| "grad_norm": 0.15149898827075958, |
| "learning_rate": 1.2500000000000006e-05, |
| "loss": 0.0423, |
| "num_input_tokens_seen": 254168, |
| "step": 400 |
| }, |
| { |
| "epoch": 7.105263157894737, |
| "grad_norm": 0.542259693145752, |
| "learning_rate": 1.1843024595699805e-05, |
| "loss": 0.1701, |
| "num_input_tokens_seen": 257080, |
| "step": 405 |
| }, |
| { |
| "epoch": 7.12280701754386, |
| "eval_loss": 0.11475398391485214, |
| "eval_runtime": 0.6622, |
| "eval_samples_per_second": 37.751, |
| "eval_steps_per_second": 10.57, |
| "num_input_tokens_seen": 257848, |
| "step": 406 |
| }, |
| { |
| "epoch": 7.192982456140351, |
| "grad_norm": 0.1998886913061142, |
| "learning_rate": 1.1198383850594758e-05, |
| "loss": 0.0453, |
| "num_input_tokens_seen": 260696, |
| "step": 410 |
| }, |
| { |
| "epoch": 7.280701754385965, |
| "grad_norm": 0.1344331055879593, |
| "learning_rate": 1.0566682115126344e-05, |
| "loss": 0.0671, |
| "num_input_tokens_seen": 263672, |
| "step": 415 |
| }, |
| { |
| "epoch": 7.368421052631579, |
| "grad_norm": 2.692500114440918, |
| "learning_rate": 9.948511609419675e-06, |
| "loss": 0.3644, |
| "num_input_tokens_seen": 266296, |
| "step": 420 |
| }, |
| { |
| "epoch": 7.456140350877193, |
| "grad_norm": 0.2710326313972473, |
| "learning_rate": 9.344451868077353e-06, |
| "loss": 0.0949, |
| "num_input_tokens_seen": 270200, |
| "step": 425 |
| }, |
| { |
| "epoch": 7.543859649122807, |
| "grad_norm": 0.181783989071846, |
| "learning_rate": 8.755069196866014e-06, |
| "loss": 0.0917, |
| "num_input_tokens_seen": 273624, |
| "step": 430 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "grad_norm": 0.4586850106716156, |
| "learning_rate": 8.180916141804906e-06, |
| "loss": 0.1441, |
| "num_input_tokens_seen": 276824, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "eval_loss": 0.11607818305492401, |
| "eval_runtime": 0.6624, |
| "eval_samples_per_second": 37.743, |
| "eval_steps_per_second": 10.568, |
| "num_input_tokens_seen": 276824, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.719298245614035, |
| "grad_norm": 0.1673639714717865, |
| "learning_rate": 7.622530971154199e-06, |
| "loss": 0.0676, |
| "num_input_tokens_seen": 279896, |
| "step": 440 |
| }, |
| { |
| "epoch": 7.807017543859649, |
| "grad_norm": 0.3598438799381256, |
| "learning_rate": 7.080437170788723e-06, |
| "loss": 0.0562, |
| "num_input_tokens_seen": 282872, |
| "step": 445 |
| }, |
| { |
| "epoch": 7.894736842105263, |
| "grad_norm": 1.1647834777832031, |
| "learning_rate": 6.555142953430158e-06, |
| "loss": 0.1745, |
| "num_input_tokens_seen": 286456, |
| "step": 450 |
| }, |
| { |
| "epoch": 7.982456140350877, |
| "grad_norm": 1.2443015575408936, |
| "learning_rate": 6.0471407821978135e-06, |
| "loss": 0.3308, |
| "num_input_tokens_seen": 289208, |
| "step": 455 |
| }, |
| { |
| "epoch": 8.070175438596491, |
| "grad_norm": 0.10643043369054794, |
| "learning_rate": 5.556906908924655e-06, |
| "loss": 0.1956, |
| "num_input_tokens_seen": 291944, |
| "step": 460 |
| }, |
| { |
| "epoch": 8.140350877192983, |
| "eval_loss": 0.11502133309841156, |
| "eval_runtime": 0.6654, |
| "eval_samples_per_second": 37.574, |
| "eval_steps_per_second": 10.521, |
| "num_input_tokens_seen": 294504, |
| "step": 464 |
| }, |
| { |
| "epoch": 8.157894736842104, |
| "grad_norm": 0.7863829731941223, |
| "learning_rate": 5.084900927671393e-06, |
| "loss": 0.0914, |
| "num_input_tokens_seen": 295400, |
| "step": 465 |
| }, |
| { |
| "epoch": 8.24561403508772, |
| "grad_norm": 1.1285400390625, |
| "learning_rate": 4.631565343857239e-06, |
| "loss": 0.1381, |
| "num_input_tokens_seen": 298792, |
| "step": 470 |
| }, |
| { |
| "epoch": 8.333333333333334, |
| "grad_norm": 0.42564141750335693, |
| "learning_rate": 4.19732515941125e-06, |
| "loss": 0.1202, |
| "num_input_tokens_seen": 302024, |
| "step": 475 |
| }, |
| { |
| "epoch": 8.421052631578947, |
| "grad_norm": 1.291963815689087, |
| "learning_rate": 3.7825874743331907e-06, |
| "loss": 0.1529, |
| "num_input_tokens_seen": 305352, |
| "step": 480 |
| }, |
| { |
| "epoch": 8.508771929824562, |
| "grad_norm": 0.7801548838615417, |
| "learning_rate": 3.3877411050374424e-06, |
| "loss": 0.1167, |
| "num_input_tokens_seen": 308072, |
| "step": 485 |
| }, |
| { |
| "epoch": 8.596491228070175, |
| "grad_norm": 0.6383867859840393, |
| "learning_rate": 3.013156219837776e-06, |
| "loss": 0.1042, |
| "num_input_tokens_seen": 311144, |
| "step": 490 |
| }, |
| { |
| "epoch": 8.649122807017545, |
| "eval_loss": 0.11202231794595718, |
| "eval_runtime": 0.6618, |
| "eval_samples_per_second": 37.776, |
| "eval_steps_per_second": 10.577, |
| "num_input_tokens_seen": 313576, |
| "step": 493 |
| }, |
| { |
| "epoch": 8.68421052631579, |
| "grad_norm": 1.4384206533432007, |
| "learning_rate": 2.659183991914696e-06, |
| "loss": 0.2703, |
| "num_input_tokens_seen": 314504, |
| "step": 495 |
| }, |
| { |
| "epoch": 8.771929824561404, |
| "grad_norm": 0.15518629550933838, |
| "learning_rate": 2.326156270090735e-06, |
| "loss": 0.1404, |
| "num_input_tokens_seen": 317736, |
| "step": 500 |
| }, |
| { |
| "epoch": 8.859649122807017, |
| "grad_norm": 0.5462822318077087, |
| "learning_rate": 2.0143852677223075e-06, |
| "loss": 0.0746, |
| "num_input_tokens_seen": 321640, |
| "step": 505 |
| }, |
| { |
| "epoch": 8.947368421052632, |
| "grad_norm": 0.5983424782752991, |
| "learning_rate": 1.7241632699998123e-06, |
| "loss": 0.0659, |
| "num_input_tokens_seen": 324552, |
| "step": 510 |
| }, |
| { |
| "epoch": 9.035087719298245, |
| "grad_norm": 1.2708483934402466, |
| "learning_rate": 1.4557623599303903e-06, |
| "loss": 0.2284, |
| "num_input_tokens_seen": 327328, |
| "step": 515 |
| }, |
| { |
| "epoch": 9.12280701754386, |
| "grad_norm": 0.12775298953056335, |
| "learning_rate": 1.2094341632602064e-06, |
| "loss": 0.0331, |
| "num_input_tokens_seen": 330592, |
| "step": 520 |
| }, |
| { |
| "epoch": 9.157894736842104, |
| "eval_loss": 0.11720363795757294, |
| "eval_runtime": 0.6626, |
| "eval_samples_per_second": 37.727, |
| "eval_steps_per_second": 10.564, |
| "num_input_tokens_seen": 332256, |
| "step": 522 |
| }, |
| { |
| "epoch": 9.210526315789474, |
| "grad_norm": 1.174777626991272, |
| "learning_rate": 9.85409612575411e-07, |
| "loss": 0.1214, |
| "num_input_tokens_seen": 334080, |
| "step": 525 |
| }, |
| { |
| "epoch": 9.298245614035087, |
| "grad_norm": 0.0826413705945015, |
| "learning_rate": 7.838987308029427e-07, |
| "loss": 0.1407, |
| "num_input_tokens_seen": 336928, |
| "step": 530 |
| }, |
| { |
| "epoch": 9.385964912280702, |
| "grad_norm": 0.3035958409309387, |
| "learning_rate": 6.050904343141095e-07, |
| "loss": 0.0595, |
| "num_input_tokens_seen": 340160, |
| "step": 535 |
| }, |
| { |
| "epoch": 9.473684210526315, |
| "grad_norm": 1.1427686214447021, |
| "learning_rate": 4.491523558155714e-07, |
| "loss": 0.2076, |
| "num_input_tokens_seen": 343424, |
| "step": 540 |
| }, |
| { |
| "epoch": 9.56140350877193, |
| "grad_norm": 0.5204566121101379, |
| "learning_rate": 3.162306871937387e-07, |
| "loss": 0.1041, |
| "num_input_tokens_seen": 346816, |
| "step": 545 |
| }, |
| { |
| "epoch": 9.649122807017545, |
| "grad_norm": 1.046081304550171, |
| "learning_rate": 2.064500424599436e-07, |
| "loss": 0.4177, |
| "num_input_tokens_seen": 349376, |
| "step": 550 |
| }, |
| { |
| "epoch": 9.666666666666666, |
| "eval_loss": 0.11319712549448013, |
| "eval_runtime": 0.6668, |
| "eval_samples_per_second": 37.49, |
| "eval_steps_per_second": 10.497, |
| "num_input_tokens_seen": 350336, |
| "step": 551 |
| }, |
| { |
| "epoch": 9.736842105263158, |
| "grad_norm": 0.9440933465957642, |
| "learning_rate": 1.1991334092484318e-07, |
| "loss": 0.0854, |
| "num_input_tokens_seen": 353152, |
| "step": 555 |
| }, |
| { |
| "epoch": 9.824561403508772, |
| "grad_norm": 0.09958238154649734, |
| "learning_rate": 5.6701710711626334e-08, |
| "loss": 0.04, |
| "num_input_tokens_seen": 355904, |
| "step": 560 |
| }, |
| { |
| "epoch": 9.912280701754385, |
| "grad_norm": 0.2486177533864975, |
| "learning_rate": 1.6874412698408836e-08, |
| "loss": 0.1639, |
| "num_input_tokens_seen": 358912, |
| "step": 565 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 1.0762845277786255, |
| "learning_rate": 4.687849611939576e-10, |
| "loss": 0.1488, |
| "num_input_tokens_seen": 361992, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.0, |
| "num_input_tokens_seen": 361992, |
| "step": 570, |
| "total_flos": 1.630076317433856e+16, |
| "train_loss": 0.3158912919853863, |
| "train_runtime": 154.4855, |
| "train_samples_per_second": 14.564, |
| "train_steps_per_second": 3.69 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 570, |
| "num_input_tokens_seen": 361992, |
| "num_train_epochs": 10, |
| "save_steps": 29, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.630076317433856e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|