| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9954545454545455, | |
| "eval_steps": 500, | |
| "global_step": 146, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006818181818181818, | |
| "grad_norm": 2.020231246948242, | |
| "learning_rate": 5.0000000000000004e-08, | |
| "loss": 0.8736, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.013636363636363636, | |
| "grad_norm": 1.9368854761123657, | |
| "learning_rate": 1.0000000000000001e-07, | |
| "loss": 0.8543, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.020454545454545454, | |
| "grad_norm": 1.908379077911377, | |
| "learning_rate": 1.5000000000000002e-07, | |
| "loss": 0.8339, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.02727272727272727, | |
| "grad_norm": 1.9313658475875854, | |
| "learning_rate": 2.0000000000000002e-07, | |
| "loss": 0.834, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.03409090909090909, | |
| "grad_norm": 1.983262062072754, | |
| "learning_rate": 2.5000000000000004e-07, | |
| "loss": 0.8726, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.04090909090909091, | |
| "grad_norm": 1.9994115829467773, | |
| "learning_rate": 3.0000000000000004e-07, | |
| "loss": 0.8135, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.04772727272727273, | |
| "grad_norm": 1.8889026641845703, | |
| "learning_rate": 3.5000000000000004e-07, | |
| "loss": 0.8439, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.05454545454545454, | |
| "grad_norm": 1.9258317947387695, | |
| "learning_rate": 4.0000000000000003e-07, | |
| "loss": 0.84, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.06136363636363636, | |
| "grad_norm": 1.9654443264007568, | |
| "learning_rate": 4.5000000000000003e-07, | |
| "loss": 0.8609, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.06818181818181818, | |
| "grad_norm": 1.9278813600540161, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 0.8515, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.075, | |
| "grad_norm": 1.8698863983154297, | |
| "learning_rate": 5.5e-07, | |
| "loss": 0.8632, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.08181818181818182, | |
| "grad_norm": 2.7078285217285156, | |
| "learning_rate": 6.000000000000001e-07, | |
| "loss": 0.8643, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.08863636363636364, | |
| "grad_norm": 1.959272027015686, | |
| "learning_rate": 6.5e-07, | |
| "loss": 0.861, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.09545454545454546, | |
| "grad_norm": 1.8952852487564087, | |
| "learning_rate": 7.000000000000001e-07, | |
| "loss": 0.8599, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.10227272727272728, | |
| "grad_norm": 1.9469462633132935, | |
| "learning_rate": 7.5e-07, | |
| "loss": 0.8467, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.10909090909090909, | |
| "grad_norm": 2.0111300945281982, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 0.8078, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.1159090909090909, | |
| "grad_norm": 1.739009141921997, | |
| "learning_rate": 8.500000000000001e-07, | |
| "loss": 0.8114, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.12272727272727273, | |
| "grad_norm": 1.7578809261322021, | |
| "learning_rate": 9.000000000000001e-07, | |
| "loss": 0.8489, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.12954545454545455, | |
| "grad_norm": 1.9787132740020752, | |
| "learning_rate": 9.500000000000001e-07, | |
| "loss": 0.8052, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.13636363636363635, | |
| "grad_norm": 1.6380164623260498, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 0.7882, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1431818181818182, | |
| "grad_norm": 1.686877727508545, | |
| "learning_rate": 1.0500000000000001e-06, | |
| "loss": 0.8126, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 1.690590739250183, | |
| "learning_rate": 1.1e-06, | |
| "loss": 0.8248, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.15681818181818183, | |
| "grad_norm": 1.612255334854126, | |
| "learning_rate": 1.1500000000000002e-06, | |
| "loss": 0.8024, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.16363636363636364, | |
| "grad_norm": 1.6669739484786987, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "loss": 0.818, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.17045454545454544, | |
| "grad_norm": 1.7726502418518066, | |
| "learning_rate": 1.25e-06, | |
| "loss": 0.8158, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.17727272727272728, | |
| "grad_norm": 1.5930036306381226, | |
| "learning_rate": 1.3e-06, | |
| "loss": 0.7886, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.18409090909090908, | |
| "grad_norm": 1.5480856895446777, | |
| "learning_rate": 1.3500000000000002e-06, | |
| "loss": 0.7747, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.19090909090909092, | |
| "grad_norm": 1.3537116050720215, | |
| "learning_rate": 1.4000000000000001e-06, | |
| "loss": 0.7891, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.19772727272727272, | |
| "grad_norm": 1.3412829637527466, | |
| "learning_rate": 1.45e-06, | |
| "loss": 0.7799, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.20454545454545456, | |
| "grad_norm": 1.237492322921753, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.7768, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.21136363636363636, | |
| "grad_norm": 1.3467835187911987, | |
| "learning_rate": 1.5500000000000002e-06, | |
| "loss": 0.7344, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.21818181818181817, | |
| "grad_norm": 1.1665314435958862, | |
| "learning_rate": 1.6000000000000001e-06, | |
| "loss": 0.7152, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.225, | |
| "grad_norm": 1.2461743354797363, | |
| "learning_rate": 1.6500000000000003e-06, | |
| "loss": 0.758, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.2318181818181818, | |
| "grad_norm": 1.0343059301376343, | |
| "learning_rate": 1.7000000000000002e-06, | |
| "loss": 0.7362, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.23863636363636365, | |
| "grad_norm": 1.112117052078247, | |
| "learning_rate": 1.75e-06, | |
| "loss": 0.7771, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.24545454545454545, | |
| "grad_norm": 1.104337453842163, | |
| "learning_rate": 1.8000000000000001e-06, | |
| "loss": 0.7479, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.25227272727272726, | |
| "grad_norm": 0.9324578642845154, | |
| "learning_rate": 1.85e-06, | |
| "loss": 0.6936, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.2590909090909091, | |
| "grad_norm": 1.0733048915863037, | |
| "learning_rate": 1.9000000000000002e-06, | |
| "loss": 0.7302, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.26590909090909093, | |
| "grad_norm": 0.8614453673362732, | |
| "learning_rate": 1.9500000000000004e-06, | |
| "loss": 0.7466, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 0.7729659080505371, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.725, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.27954545454545454, | |
| "grad_norm": 0.9341004490852356, | |
| "learning_rate": 2.05e-06, | |
| "loss": 0.6949, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.2863636363636364, | |
| "grad_norm": 0.6668925881385803, | |
| "learning_rate": 2.1000000000000002e-06, | |
| "loss": 0.6587, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.29318181818181815, | |
| "grad_norm": 0.681847333908081, | |
| "learning_rate": 2.15e-06, | |
| "loss": 0.6984, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.6222082376480103, | |
| "learning_rate": 2.2e-06, | |
| "loss": 0.7063, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.3068181818181818, | |
| "grad_norm": 0.790607213973999, | |
| "learning_rate": 2.25e-06, | |
| "loss": 0.6804, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.31363636363636366, | |
| "grad_norm": 0.6110427379608154, | |
| "learning_rate": 2.3000000000000004e-06, | |
| "loss": 0.6694, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.32045454545454544, | |
| "grad_norm": 0.6481993794441223, | |
| "learning_rate": 2.35e-06, | |
| "loss": 0.7321, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.32727272727272727, | |
| "grad_norm": 0.5973438620567322, | |
| "learning_rate": 2.4000000000000003e-06, | |
| "loss": 0.6508, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.3340909090909091, | |
| "grad_norm": 0.6252740621566772, | |
| "learning_rate": 2.4500000000000003e-06, | |
| "loss": 0.6911, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.3409090909090909, | |
| "grad_norm": 0.6826481819152832, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.6855, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3477272727272727, | |
| "grad_norm": 0.6307975649833679, | |
| "learning_rate": 2.55e-06, | |
| "loss": 0.714, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.35454545454545455, | |
| "grad_norm": 0.5900976061820984, | |
| "learning_rate": 2.6e-06, | |
| "loss": 0.6619, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.3613636363636364, | |
| "grad_norm": 0.6203920841217041, | |
| "learning_rate": 2.6500000000000005e-06, | |
| "loss": 0.6822, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.36818181818181817, | |
| "grad_norm": 0.6197589039802551, | |
| "learning_rate": 2.7000000000000004e-06, | |
| "loss": 0.6584, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 0.5921110510826111, | |
| "learning_rate": 2.7500000000000004e-06, | |
| "loss": 0.6739, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.38181818181818183, | |
| "grad_norm": 0.6215619444847107, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 0.6631, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.3886363636363636, | |
| "grad_norm": 0.5495648980140686, | |
| "learning_rate": 2.85e-06, | |
| "loss": 0.6531, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.39545454545454545, | |
| "grad_norm": 0.531947910785675, | |
| "learning_rate": 2.9e-06, | |
| "loss": 0.6365, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.4022727272727273, | |
| "grad_norm": 0.559112548828125, | |
| "learning_rate": 2.95e-06, | |
| "loss": 0.6391, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.4090909090909091, | |
| "grad_norm": 0.5377560257911682, | |
| "learning_rate": 3e-06, | |
| "loss": 0.6775, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4159090909090909, | |
| "grad_norm": 0.5167352557182312, | |
| "learning_rate": 3.05e-06, | |
| "loss": 0.6518, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.42272727272727273, | |
| "grad_norm": 0.5483390092849731, | |
| "learning_rate": 3.1000000000000004e-06, | |
| "loss": 0.5972, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.42954545454545456, | |
| "grad_norm": 0.5687050223350525, | |
| "learning_rate": 3.1500000000000003e-06, | |
| "loss": 0.6515, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.43636363636363634, | |
| "grad_norm": 0.9046968817710876, | |
| "learning_rate": 3.2000000000000003e-06, | |
| "loss": 0.6497, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.4431818181818182, | |
| "grad_norm": 0.5231667757034302, | |
| "learning_rate": 3.2500000000000002e-06, | |
| "loss": 0.6364, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.49699342250823975, | |
| "learning_rate": 3.3000000000000006e-06, | |
| "loss": 0.6144, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.45681818181818185, | |
| "grad_norm": 0.5390080213546753, | |
| "learning_rate": 3.3500000000000005e-06, | |
| "loss": 0.6051, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.4636363636363636, | |
| "grad_norm": 0.5252938270568848, | |
| "learning_rate": 3.4000000000000005e-06, | |
| "loss": 0.6353, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.47045454545454546, | |
| "grad_norm": 0.6291780471801758, | |
| "learning_rate": 3.45e-06, | |
| "loss": 0.6326, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.4772727272727273, | |
| "grad_norm": 0.5545375943183899, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.647, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.48409090909090907, | |
| "grad_norm": 0.47775956988334656, | |
| "learning_rate": 3.5500000000000003e-06, | |
| "loss": 0.5993, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.4909090909090909, | |
| "grad_norm": 0.5016375184059143, | |
| "learning_rate": 3.6000000000000003e-06, | |
| "loss": 0.6056, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.49772727272727274, | |
| "grad_norm": 0.4780517816543579, | |
| "learning_rate": 3.65e-06, | |
| "loss": 0.6189, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.5045454545454545, | |
| "grad_norm": 0.5763716697692871, | |
| "learning_rate": 3.7e-06, | |
| "loss": 0.6262, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.5113636363636364, | |
| "grad_norm": 0.5321404337882996, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "loss": 0.6336, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.5181818181818182, | |
| "grad_norm": 0.4641991853713989, | |
| "learning_rate": 3.8000000000000005e-06, | |
| "loss": 0.6317, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.525, | |
| "grad_norm": 0.45118412375450134, | |
| "learning_rate": 3.85e-06, | |
| "loss": 0.5927, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.5318181818181819, | |
| "grad_norm": 0.5500645041465759, | |
| "learning_rate": 3.900000000000001e-06, | |
| "loss": 0.62, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.5386363636363637, | |
| "grad_norm": 0.5197616219520569, | |
| "learning_rate": 3.95e-06, | |
| "loss": 0.6063, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 0.4672992527484894, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.6156, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5522727272727272, | |
| "grad_norm": 0.5995073318481445, | |
| "learning_rate": 4.05e-06, | |
| "loss": 0.6219, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.5590909090909091, | |
| "grad_norm": 0.5593804717063904, | |
| "learning_rate": 4.1e-06, | |
| "loss": 0.6282, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.5659090909090909, | |
| "grad_norm": 0.7544977068901062, | |
| "learning_rate": 4.15e-06, | |
| "loss": 0.6246, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.5727272727272728, | |
| "grad_norm": 0.45378655195236206, | |
| "learning_rate": 4.2000000000000004e-06, | |
| "loss": 0.5881, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.5795454545454546, | |
| "grad_norm": 0.4407907724380493, | |
| "learning_rate": 4.25e-06, | |
| "loss": 0.6026, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.5863636363636363, | |
| "grad_norm": 0.4333001673221588, | |
| "learning_rate": 4.3e-06, | |
| "loss": 0.6098, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.5931818181818181, | |
| "grad_norm": 0.45020997524261475, | |
| "learning_rate": 4.350000000000001e-06, | |
| "loss": 0.5858, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.424003541469574, | |
| "learning_rate": 4.4e-06, | |
| "loss": 0.6186, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.6068181818181818, | |
| "grad_norm": 0.427081435918808, | |
| "learning_rate": 4.450000000000001e-06, | |
| "loss": 0.6164, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.6136363636363636, | |
| "grad_norm": 0.4430546164512634, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.6049, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6204545454545455, | |
| "grad_norm": 0.5469494462013245, | |
| "learning_rate": 4.5500000000000005e-06, | |
| "loss": 0.608, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.6272727272727273, | |
| "grad_norm": 0.4481559693813324, | |
| "learning_rate": 4.600000000000001e-06, | |
| "loss": 0.5532, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.634090909090909, | |
| "grad_norm": 0.6563135981559753, | |
| "learning_rate": 4.65e-06, | |
| "loss": 0.6105, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.6409090909090909, | |
| "grad_norm": 0.4572807252407074, | |
| "learning_rate": 4.7e-06, | |
| "loss": 0.5976, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.6477272727272727, | |
| "grad_norm": 0.4423324167728424, | |
| "learning_rate": 4.75e-06, | |
| "loss": 0.616, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.6545454545454545, | |
| "grad_norm": 0.6230632066726685, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 0.5879, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.6613636363636364, | |
| "grad_norm": 0.461467981338501, | |
| "learning_rate": 4.85e-06, | |
| "loss": 0.5992, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.6681818181818182, | |
| "grad_norm": 0.44455233216285706, | |
| "learning_rate": 4.9000000000000005e-06, | |
| "loss": 0.6138, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.675, | |
| "grad_norm": 0.47281715273857117, | |
| "learning_rate": 4.95e-06, | |
| "loss": 0.59, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.6818181818181818, | |
| "grad_norm": 0.46114516258239746, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6216, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6886363636363636, | |
| "grad_norm": 0.49900761246681213, | |
| "learning_rate": 4.9999795126530275e-06, | |
| "loss": 0.6222, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.6954545454545454, | |
| "grad_norm": 0.4764839708805084, | |
| "learning_rate": 4.999918050947891e-06, | |
| "loss": 0.5829, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.7022727272727273, | |
| "grad_norm": 0.43502742052078247, | |
| "learning_rate": 4.999815615891943e-06, | |
| "loss": 0.5982, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.7090909090909091, | |
| "grad_norm": 0.43318599462509155, | |
| "learning_rate": 4.9996722091640805e-06, | |
| "loss": 0.5827, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.7159090909090909, | |
| "grad_norm": 0.47218361496925354, | |
| "learning_rate": 4.9994878331147225e-06, | |
| "loss": 0.5907, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.7227272727272728, | |
| "grad_norm": 0.48492228984832764, | |
| "learning_rate": 4.99926249076577e-06, | |
| "loss": 0.5459, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.7295454545454545, | |
| "grad_norm": 0.4019850194454193, | |
| "learning_rate": 4.998996185810557e-06, | |
| "loss": 0.5957, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.7363636363636363, | |
| "grad_norm": 0.47361984848976135, | |
| "learning_rate": 4.998688922613788e-06, | |
| "loss": 0.579, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.7431818181818182, | |
| "grad_norm": 0.4612269401550293, | |
| "learning_rate": 4.9983407062114695e-06, | |
| "loss": 0.5814, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.45558398962020874, | |
| "learning_rate": 4.9979515423108255e-06, | |
| "loss": 0.6036, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7568181818181818, | |
| "grad_norm": 0.41492199897766113, | |
| "learning_rate": 4.997521437290205e-06, | |
| "loss": 0.5891, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.7636363636363637, | |
| "grad_norm": 0.4484409689903259, | |
| "learning_rate": 4.997050398198977e-06, | |
| "loss": 0.6031, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.7704545454545455, | |
| "grad_norm": 0.42671382427215576, | |
| "learning_rate": 4.996538432757414e-06, | |
| "loss": 0.6106, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.7772727272727272, | |
| "grad_norm": 0.4222075343132019, | |
| "learning_rate": 4.995985549356568e-06, | |
| "loss": 0.5774, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.7840909090909091, | |
| "grad_norm": 0.421021431684494, | |
| "learning_rate": 4.995391757058129e-06, | |
| "loss": 0.5754, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.7909090909090909, | |
| "grad_norm": 0.4215952754020691, | |
| "learning_rate": 4.99475706559428e-06, | |
| "loss": 0.5895, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.7977272727272727, | |
| "grad_norm": 0.42652982473373413, | |
| "learning_rate": 4.994081485367537e-06, | |
| "loss": 0.5646, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.8045454545454546, | |
| "grad_norm": 0.4145742654800415, | |
| "learning_rate": 4.993365027450576e-06, | |
| "loss": 0.5862, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.8113636363636364, | |
| "grad_norm": 0.4754612445831299, | |
| "learning_rate": 4.992607703586058e-06, | |
| "loss": 0.5645, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.8181818181818182, | |
| "grad_norm": 0.4694959223270416, | |
| "learning_rate": 4.991809526186424e-06, | |
| "loss": 0.5989, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.825, | |
| "grad_norm": 0.4573237895965576, | |
| "learning_rate": 4.990970508333707e-06, | |
| "loss": 0.5769, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.8318181818181818, | |
| "grad_norm": 0.4592607915401459, | |
| "learning_rate": 4.990090663779305e-06, | |
| "loss": 0.5529, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.8386363636363636, | |
| "grad_norm": 0.4325920045375824, | |
| "learning_rate": 4.9891700069437635e-06, | |
| "loss": 0.5846, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.8454545454545455, | |
| "grad_norm": 0.44623467326164246, | |
| "learning_rate": 4.988208552916535e-06, | |
| "loss": 0.5997, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.8522727272727273, | |
| "grad_norm": 0.46071872115135193, | |
| "learning_rate": 4.987206317455734e-06, | |
| "loss": 0.5687, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.8590909090909091, | |
| "grad_norm": 0.43121206760406494, | |
| "learning_rate": 4.986163316987877e-06, | |
| "loss": 0.5277, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.865909090909091, | |
| "grad_norm": 0.4711630046367645, | |
| "learning_rate": 4.985079568607613e-06, | |
| "loss": 0.592, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.8727272727272727, | |
| "grad_norm": 0.4450221657752991, | |
| "learning_rate": 4.983955090077445e-06, | |
| "loss": 0.5656, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.8795454545454545, | |
| "grad_norm": 0.6546564102172852, | |
| "learning_rate": 4.982789899827439e-06, | |
| "loss": 0.572, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.8863636363636364, | |
| "grad_norm": 0.49622273445129395, | |
| "learning_rate": 4.9815840169549216e-06, | |
| "loss": 0.5719, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.8931818181818182, | |
| "grad_norm": 0.5478544235229492, | |
| "learning_rate": 4.980337461224164e-06, | |
| "loss": 0.5774, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.4340018033981323, | |
| "learning_rate": 4.979050253066064e-06, | |
| "loss": 0.5444, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.9068181818181819, | |
| "grad_norm": 0.4243793189525604, | |
| "learning_rate": 4.977722413577802e-06, | |
| "loss": 0.5759, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.9136363636363637, | |
| "grad_norm": 0.45201942324638367, | |
| "learning_rate": 4.976353964522509e-06, | |
| "loss": 0.5746, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.9204545454545454, | |
| "grad_norm": 0.42892220616340637, | |
| "learning_rate": 4.974944928328894e-06, | |
| "loss": 0.544, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.9272727272727272, | |
| "grad_norm": 0.43185824155807495, | |
| "learning_rate": 4.973495328090891e-06, | |
| "loss": 0.5803, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.9340909090909091, | |
| "grad_norm": 0.5512542724609375, | |
| "learning_rate": 4.972005187567267e-06, | |
| "loss": 0.5694, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.9409090909090909, | |
| "grad_norm": 0.4836059808731079, | |
| "learning_rate": 4.970474531181245e-06, | |
| "loss": 0.5581, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.9477272727272728, | |
| "grad_norm": 0.4508282244205475, | |
| "learning_rate": 4.968903384020095e-06, | |
| "loss": 0.5553, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.9545454545454546, | |
| "grad_norm": 0.4764733612537384, | |
| "learning_rate": 4.967291771834727e-06, | |
| "loss": 0.5798, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9613636363636363, | |
| "grad_norm": 0.463468074798584, | |
| "learning_rate": 4.965639721039267e-06, | |
| "loss": 0.559, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.9681818181818181, | |
| "grad_norm": 0.4655650854110718, | |
| "learning_rate": 4.963947258710626e-06, | |
| "loss": 0.5576, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.975, | |
| "grad_norm": 0.4651234745979309, | |
| "learning_rate": 4.962214412588053e-06, | |
| "loss": 0.5792, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.9818181818181818, | |
| "grad_norm": 0.43080538511276245, | |
| "learning_rate": 4.960441211072686e-06, | |
| "loss": 0.5314, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.9886363636363636, | |
| "grad_norm": 0.4403238594532013, | |
| "learning_rate": 4.9586276832270785e-06, | |
| "loss": 0.5592, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.9954545454545455, | |
| "grad_norm": 0.43968647718429565, | |
| "learning_rate": 4.9567738587747314e-06, | |
| "loss": 0.5573, | |
| "step": 146 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 876, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 146, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.39244756481029e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |