Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9152, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0064, | |
| "grad_norm": 2.0177674293518066, | |
| "learning_rate": 0.001, | |
| "loss": 7.5774, | |
| "mean_token_accuracy": 0.5871803313493729, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0128, | |
| "grad_norm": 2.9027979373931885, | |
| "learning_rate": 0.001, | |
| "loss": 8.8307, | |
| "mean_token_accuracy": 0.5205357447266579, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0192, | |
| "grad_norm": 20.20247459411621, | |
| "learning_rate": 0.001, | |
| "loss": 10.8145, | |
| "mean_token_accuracy": 0.46558595448732376, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0256, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.001, | |
| "loss": 12.6533, | |
| "mean_token_accuracy": 0.4072755202651024, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.032, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.001, | |
| "loss": 13.6561, | |
| "mean_token_accuracy": 0.37730202078819275, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0384, | |
| "grad_norm": 43.733421325683594, | |
| "learning_rate": 0.001, | |
| "loss": 12.4744, | |
| "mean_token_accuracy": 0.40100549906492233, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0448, | |
| "grad_norm": 19.141199111938477, | |
| "learning_rate": 0.001, | |
| "loss": 9.5344, | |
| "mean_token_accuracy": 0.5192811861634254, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.0512, | |
| "grad_norm": 18.41023826599121, | |
| "learning_rate": 0.001, | |
| "loss": 14.3608, | |
| "mean_token_accuracy": 0.43483711034059525, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0576, | |
| "grad_norm": 43.658721923828125, | |
| "learning_rate": 0.001, | |
| "loss": 13.3078, | |
| "mean_token_accuracy": 0.41958054155111313, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 8.837448120117188, | |
| "learning_rate": 0.001, | |
| "loss": 10.5372, | |
| "mean_token_accuracy": 0.47571663558483124, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0704, | |
| "grad_norm": 10.096052169799805, | |
| "learning_rate": 0.001, | |
| "loss": 12.1162, | |
| "mean_token_accuracy": 0.4255446493625641, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.0768, | |
| "grad_norm": 5.100619316101074, | |
| "learning_rate": 0.001, | |
| "loss": 9.2243, | |
| "mean_token_accuracy": 0.5292309895157814, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0832, | |
| "grad_norm": 4.010665416717529, | |
| "learning_rate": 0.001, | |
| "loss": 6.7791, | |
| "mean_token_accuracy": 0.6034770011901855, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.0896, | |
| "grad_norm": 7.71261739730835, | |
| "learning_rate": 0.001, | |
| "loss": 8.4717, | |
| "mean_token_accuracy": 0.519000381231308, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 6.087244987487793, | |
| "learning_rate": 0.001, | |
| "loss": 8.6032, | |
| "mean_token_accuracy": 0.5275572314858437, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.1024, | |
| "grad_norm": 3.9907681941986084, | |
| "learning_rate": 0.001, | |
| "loss": 7.6546, | |
| "mean_token_accuracy": 0.542157307267189, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.1088, | |
| "grad_norm": 4.168624401092529, | |
| "learning_rate": 0.001, | |
| "loss": 7.6228, | |
| "mean_token_accuracy": 0.5621763542294502, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.1152, | |
| "grad_norm": 4.9508490562438965, | |
| "learning_rate": 0.001, | |
| "loss": 9.808, | |
| "mean_token_accuracy": 0.49128710478544235, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.1216, | |
| "grad_norm": 4.520578861236572, | |
| "learning_rate": 0.001, | |
| "loss": 8.8476, | |
| "mean_token_accuracy": 0.4943431541323662, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 6.03663444519043, | |
| "learning_rate": 0.001, | |
| "loss": 8.0179, | |
| "mean_token_accuracy": 0.5464938580989838, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1344, | |
| "grad_norm": 4.153735637664795, | |
| "learning_rate": 0.001, | |
| "loss": 8.7043, | |
| "mean_token_accuracy": 0.5475813895463943, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.1408, | |
| "grad_norm": 3.6078875064849854, | |
| "learning_rate": 0.001, | |
| "loss": 8.5597, | |
| "mean_token_accuracy": 0.5447330102324486, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.1472, | |
| "grad_norm": 3.8478240966796875, | |
| "learning_rate": 0.001, | |
| "loss": 7.2146, | |
| "mean_token_accuracy": 0.5992539674043655, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.1536, | |
| "grad_norm": 4.033797264099121, | |
| "learning_rate": 0.001, | |
| "loss": 9.3191, | |
| "mean_token_accuracy": 0.5196451023221016, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 3.2458839416503906, | |
| "learning_rate": 0.001, | |
| "loss": 7.4417, | |
| "mean_token_accuracy": 0.5597032383084297, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.1664, | |
| "grad_norm": 3.520216226577759, | |
| "learning_rate": 0.001, | |
| "loss": 6.7451, | |
| "mean_token_accuracy": 0.5860115587711334, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.1728, | |
| "grad_norm": 3.7371273040771484, | |
| "learning_rate": 0.001, | |
| "loss": 7.8899, | |
| "mean_token_accuracy": 0.5652190744876862, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.1792, | |
| "grad_norm": 4.060925006866455, | |
| "learning_rate": 0.001, | |
| "loss": 7.4009, | |
| "mean_token_accuracy": 0.5925501734018326, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.1856, | |
| "grad_norm": 3.9062178134918213, | |
| "learning_rate": 0.001, | |
| "loss": 7.3145, | |
| "mean_token_accuracy": 0.5906639099121094, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 3.7855491638183594, | |
| "learning_rate": 0.001, | |
| "loss": 7.9822, | |
| "mean_token_accuracy": 0.5379130840301514, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.1984, | |
| "grad_norm": 3.340970516204834, | |
| "learning_rate": 0.001, | |
| "loss": 7.1766, | |
| "mean_token_accuracy": 0.5813343971967697, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.2048, | |
| "grad_norm": 3.692194938659668, | |
| "learning_rate": 0.001, | |
| "loss": 7.6622, | |
| "mean_token_accuracy": 0.5705007463693619, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.2112, | |
| "grad_norm": 3.3015754222869873, | |
| "learning_rate": 0.001, | |
| "loss": 7.1023, | |
| "mean_token_accuracy": 0.5972073376178741, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.2176, | |
| "grad_norm": 3.191516160964966, | |
| "learning_rate": 0.001, | |
| "loss": 7.8106, | |
| "mean_token_accuracy": 0.577879324555397, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.224, | |
| "grad_norm": 3.015953779220581, | |
| "learning_rate": 0.001, | |
| "loss": 8.2667, | |
| "mean_token_accuracy": 0.5295384302735329, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.2304, | |
| "grad_norm": 3.0506298542022705, | |
| "learning_rate": 0.001, | |
| "loss": 8.2168, | |
| "mean_token_accuracy": 0.5699651688337326, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.2368, | |
| "grad_norm": 3.11960506439209, | |
| "learning_rate": 0.001, | |
| "loss": 7.697, | |
| "mean_token_accuracy": 0.5628390982747078, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.2432, | |
| "grad_norm": 3.0201468467712402, | |
| "learning_rate": 0.001, | |
| "loss": 7.6835, | |
| "mean_token_accuracy": 0.5989912003278732, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.2496, | |
| "grad_norm": 3.462604522705078, | |
| "learning_rate": 0.001, | |
| "loss": 7.8899, | |
| "mean_token_accuracy": 0.5595831423997879, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "grad_norm": 3.401752233505249, | |
| "learning_rate": 0.001, | |
| "loss": 7.9503, | |
| "mean_token_accuracy": 0.5780320316553116, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2624, | |
| "grad_norm": 4.185473442077637, | |
| "learning_rate": 0.001, | |
| "loss": 7.799, | |
| "mean_token_accuracy": 0.5548681393265724, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.2688, | |
| "grad_norm": 3.9332938194274902, | |
| "learning_rate": 0.001, | |
| "loss": 7.7152, | |
| "mean_token_accuracy": 0.5636641383171082, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.2752, | |
| "grad_norm": 3.2568907737731934, | |
| "learning_rate": 0.001, | |
| "loss": 7.4339, | |
| "mean_token_accuracy": 0.5839549452066422, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.2816, | |
| "grad_norm": 4.672369956970215, | |
| "learning_rate": 0.001, | |
| "loss": 9.5802, | |
| "mean_token_accuracy": 0.5100178346037865, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "grad_norm": 2.7107110023498535, | |
| "learning_rate": 0.001, | |
| "loss": 6.8152, | |
| "mean_token_accuracy": 0.6055551916360855, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.2944, | |
| "grad_norm": 4.762430667877197, | |
| "learning_rate": 0.001, | |
| "loss": 9.5548, | |
| "mean_token_accuracy": 0.5057197883725166, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.3008, | |
| "grad_norm": 3.7424633502960205, | |
| "learning_rate": 0.001, | |
| "loss": 8.5479, | |
| "mean_token_accuracy": 0.5115512683987617, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.3072, | |
| "grad_norm": 3.2961721420288086, | |
| "learning_rate": 0.001, | |
| "loss": 8.2113, | |
| "mean_token_accuracy": 0.557342454791069, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.3136, | |
| "grad_norm": 3.239579677581787, | |
| "learning_rate": 0.001, | |
| "loss": 8.8304, | |
| "mean_token_accuracy": 0.5229588970541954, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 3.273914337158203, | |
| "learning_rate": 0.001, | |
| "loss": 7.7483, | |
| "mean_token_accuracy": 0.5654094517230988, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3264, | |
| "grad_norm": 3.6419663429260254, | |
| "learning_rate": 0.001, | |
| "loss": 7.3461, | |
| "mean_token_accuracy": 0.6018975228071213, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.3328, | |
| "grad_norm": 3.4327375888824463, | |
| "learning_rate": 0.001, | |
| "loss": 7.6107, | |
| "mean_token_accuracy": 0.5776806026697159, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.3392, | |
| "grad_norm": 2.8938372135162354, | |
| "learning_rate": 0.001, | |
| "loss": 7.7927, | |
| "mean_token_accuracy": 0.5748834684491158, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.3456, | |
| "grad_norm": 3.780987024307251, | |
| "learning_rate": 0.001, | |
| "loss": 7.7148, | |
| "mean_token_accuracy": 0.5800818204879761, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.352, | |
| "grad_norm": 7.018247604370117, | |
| "learning_rate": 0.001, | |
| "loss": 7.9333, | |
| "mean_token_accuracy": 0.557623416185379, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.3584, | |
| "grad_norm": 4.366345405578613, | |
| "learning_rate": 0.001, | |
| "loss": 8.7168, | |
| "mean_token_accuracy": 0.5106043368577957, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.3648, | |
| "grad_norm": 3.5044593811035156, | |
| "learning_rate": 0.001, | |
| "loss": 7.3359, | |
| "mean_token_accuracy": 0.5744027197360992, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.3712, | |
| "grad_norm": 3.710293769836426, | |
| "learning_rate": 0.001, | |
| "loss": 7.1611, | |
| "mean_token_accuracy": 0.6022080183029175, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.3776, | |
| "grad_norm": 3.784938097000122, | |
| "learning_rate": 0.001, | |
| "loss": 7.0631, | |
| "mean_token_accuracy": 0.5821704566478729, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 3.79630708694458, | |
| "learning_rate": 0.001, | |
| "loss": 6.6392, | |
| "mean_token_accuracy": 0.6132841110229492, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3904, | |
| "grad_norm": 3.004554510116577, | |
| "learning_rate": 0.001, | |
| "loss": 7.1997, | |
| "mean_token_accuracy": 0.5954362899065018, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.3968, | |
| "grad_norm": 4.015055179595947, | |
| "learning_rate": 0.001, | |
| "loss": 7.4768, | |
| "mean_token_accuracy": 0.5736093372106552, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.4032, | |
| "grad_norm": 3.654245615005493, | |
| "learning_rate": 0.001, | |
| "loss": 7.8212, | |
| "mean_token_accuracy": 0.5659709572792053, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.4096, | |
| "grad_norm": 4.0627641677856445, | |
| "learning_rate": 0.001, | |
| "loss": 9.2678, | |
| "mean_token_accuracy": 0.5201347693800926, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.416, | |
| "grad_norm": 3.4690985679626465, | |
| "learning_rate": 0.001, | |
| "loss": 8.8517, | |
| "mean_token_accuracy": 0.5343466401100159, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.4224, | |
| "grad_norm": 3.752857208251953, | |
| "learning_rate": 0.001, | |
| "loss": 8.8329, | |
| "mean_token_accuracy": 0.5228310152888298, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.4288, | |
| "grad_norm": 2.968993902206421, | |
| "learning_rate": 0.001, | |
| "loss": 7.6058, | |
| "mean_token_accuracy": 0.5344163477420807, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.4352, | |
| "grad_norm": 3.72929310798645, | |
| "learning_rate": 0.001, | |
| "loss": 9.3222, | |
| "mean_token_accuracy": 0.516074076294899, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.4416, | |
| "grad_norm": 3.6030168533325195, | |
| "learning_rate": 0.001, | |
| "loss": 8.5513, | |
| "mean_token_accuracy": 0.5421805381774902, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.448, | |
| "grad_norm": 3.1739273071289062, | |
| "learning_rate": 0.001, | |
| "loss": 7.0802, | |
| "mean_token_accuracy": 0.5998114049434662, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.4544, | |
| "grad_norm": 2.880340099334717, | |
| "learning_rate": 0.001, | |
| "loss": 6.7046, | |
| "mean_token_accuracy": 0.6100719571113586, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.4608, | |
| "grad_norm": 3.2981016635894775, | |
| "learning_rate": 0.001, | |
| "loss": 7.9998, | |
| "mean_token_accuracy": 0.5657886415719986, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.4672, | |
| "grad_norm": 4.456881999969482, | |
| "learning_rate": 0.001, | |
| "loss": 7.1571, | |
| "mean_token_accuracy": 0.5802148729562759, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.4736, | |
| "grad_norm": 4.069066047668457, | |
| "learning_rate": 0.001, | |
| "loss": 8.8054, | |
| "mean_token_accuracy": 0.5348180085420609, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 9.803370475769043, | |
| "learning_rate": 0.001, | |
| "loss": 8.5662, | |
| "mean_token_accuracy": 0.5325330048799515, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.4864, | |
| "grad_norm": 4.328041076660156, | |
| "learning_rate": 0.001, | |
| "loss": 8.1187, | |
| "mean_token_accuracy": 0.5573177188634872, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.4928, | |
| "grad_norm": 5.3780975341796875, | |
| "learning_rate": 0.001, | |
| "loss": 8.1414, | |
| "mean_token_accuracy": 0.5307755619287491, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.4992, | |
| "grad_norm": 4.601280212402344, | |
| "learning_rate": 0.001, | |
| "loss": 8.1343, | |
| "mean_token_accuracy": 0.5730244666337967, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.5056, | |
| "grad_norm": 5.185049533843994, | |
| "learning_rate": 0.001, | |
| "loss": 10.3809, | |
| "mean_token_accuracy": 0.45638662576675415, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.512, | |
| "grad_norm": 4.1511054039001465, | |
| "learning_rate": 0.001, | |
| "loss": 7.903, | |
| "mean_token_accuracy": 0.5588131695985794, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5184, | |
| "grad_norm": 3.8643414974212646, | |
| "learning_rate": 0.001, | |
| "loss": 9.1992, | |
| "mean_token_accuracy": 0.526216022670269, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.5248, | |
| "grad_norm": 3.9921276569366455, | |
| "learning_rate": 0.001, | |
| "loss": 7.9791, | |
| "mean_token_accuracy": 0.5335483700037003, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.5312, | |
| "grad_norm": 3.2328104972839355, | |
| "learning_rate": 0.001, | |
| "loss": 8.3164, | |
| "mean_token_accuracy": 0.5265945047140121, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.5376, | |
| "grad_norm": 4.729416370391846, | |
| "learning_rate": 0.001, | |
| "loss": 8.9703, | |
| "mean_token_accuracy": 0.5294319912791252, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.544, | |
| "grad_norm": 3.562079906463623, | |
| "learning_rate": 0.001, | |
| "loss": 8.2047, | |
| "mean_token_accuracy": 0.557565338909626, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.5504, | |
| "grad_norm": 3.8010246753692627, | |
| "learning_rate": 0.001, | |
| "loss": 8.0639, | |
| "mean_token_accuracy": 0.5428211987018585, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.5568, | |
| "grad_norm": 3.3087170124053955, | |
| "learning_rate": 0.001, | |
| "loss": 8.145, | |
| "mean_token_accuracy": 0.5458494797348976, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.5632, | |
| "grad_norm": 3.956712245941162, | |
| "learning_rate": 0.001, | |
| "loss": 8.6958, | |
| "mean_token_accuracy": 0.5472606420516968, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.5696, | |
| "grad_norm": 3.548285722732544, | |
| "learning_rate": 0.001, | |
| "loss": 7.0706, | |
| "mean_token_accuracy": 0.605229914188385, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.576, | |
| "grad_norm": 3.6893436908721924, | |
| "learning_rate": 0.001, | |
| "loss": 6.2735, | |
| "mean_token_accuracy": 0.6206476241350174, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5824, | |
| "grad_norm": 8.876976013183594, | |
| "learning_rate": 0.001, | |
| "loss": 9.1893, | |
| "mean_token_accuracy": 0.5234974399209023, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.5888, | |
| "grad_norm": 4.515777587890625, | |
| "learning_rate": 0.001, | |
| "loss": 7.1549, | |
| "mean_token_accuracy": 0.5581575930118561, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.5952, | |
| "grad_norm": 3.814516067504883, | |
| "learning_rate": 0.001, | |
| "loss": 7.7558, | |
| "mean_token_accuracy": 0.572398416697979, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.6016, | |
| "grad_norm": 4.637458801269531, | |
| "learning_rate": 0.001, | |
| "loss": 8.2918, | |
| "mean_token_accuracy": 0.5480805337429047, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.608, | |
| "grad_norm": 3.827664613723755, | |
| "learning_rate": 0.001, | |
| "loss": 7.6153, | |
| "mean_token_accuracy": 0.5540450513362885, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.6144, | |
| "grad_norm": 3.596336841583252, | |
| "learning_rate": 0.001, | |
| "loss": 8.1713, | |
| "mean_token_accuracy": 0.5356313809752464, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.6208, | |
| "grad_norm": 8.448469161987305, | |
| "learning_rate": 0.001, | |
| "loss": 8.668, | |
| "mean_token_accuracy": 0.5483140274882317, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.6272, | |
| "grad_norm": 5.3135576248168945, | |
| "learning_rate": 0.001, | |
| "loss": 7.7838, | |
| "mean_token_accuracy": 0.5676964446902275, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.6336, | |
| "grad_norm": 4.343587398529053, | |
| "learning_rate": 0.001, | |
| "loss": 7.2509, | |
| "mean_token_accuracy": 0.5730399489402771, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 3.066798686981201, | |
| "learning_rate": 0.001, | |
| "loss": 7.0591, | |
| "mean_token_accuracy": 0.6044407039880753, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6464, | |
| "grad_norm": 4.594414234161377, | |
| "learning_rate": 0.001, | |
| "loss": 7.8146, | |
| "mean_token_accuracy": 0.5625345483422279, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.6528, | |
| "grad_norm": 7.372500419616699, | |
| "learning_rate": 0.001, | |
| "loss": 7.9078, | |
| "mean_token_accuracy": 0.5716247260570526, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.6592, | |
| "grad_norm": 4.589946269989014, | |
| "learning_rate": 0.001, | |
| "loss": 7.0762, | |
| "mean_token_accuracy": 0.5866946280002594, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.6656, | |
| "grad_norm": 8.939701080322266, | |
| "learning_rate": 0.001, | |
| "loss": 8.9255, | |
| "mean_token_accuracy": 0.528026632964611, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.672, | |
| "grad_norm": 4.3290534019470215, | |
| "learning_rate": 0.001, | |
| "loss": 8.1217, | |
| "mean_token_accuracy": 0.5670952945947647, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.6784, | |
| "grad_norm": 3.7743752002716064, | |
| "learning_rate": 0.001, | |
| "loss": 7.4548, | |
| "mean_token_accuracy": 0.5619177967309952, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.6848, | |
| "grad_norm": 4.942656993865967, | |
| "learning_rate": 0.001, | |
| "loss": 8.1344, | |
| "mean_token_accuracy": 0.565968431532383, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.6912, | |
| "grad_norm": 4.2500715255737305, | |
| "learning_rate": 0.001, | |
| "loss": 7.5658, | |
| "mean_token_accuracy": 0.5492838993668556, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.6976, | |
| "grad_norm": 3.6989309787750244, | |
| "learning_rate": 0.001, | |
| "loss": 7.7848, | |
| "mean_token_accuracy": 0.5549194514751434, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.704, | |
| "grad_norm": 4.687801837921143, | |
| "learning_rate": 0.001, | |
| "loss": 8.1714, | |
| "mean_token_accuracy": 0.5541109219193459, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7104, | |
| "grad_norm": 3.4812569618225098, | |
| "learning_rate": 0.001, | |
| "loss": 9.1957, | |
| "mean_token_accuracy": 0.5032462030649185, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.7168, | |
| "grad_norm": 4.627752780914307, | |
| "learning_rate": 0.001, | |
| "loss": 9.3127, | |
| "mean_token_accuracy": 0.5073471590876579, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.7232, | |
| "grad_norm": 3.9376790523529053, | |
| "learning_rate": 0.001, | |
| "loss": 8.562, | |
| "mean_token_accuracy": 0.5392563939094543, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.7296, | |
| "grad_norm": 3.162787675857544, | |
| "learning_rate": 0.001, | |
| "loss": 7.3301, | |
| "mean_token_accuracy": 0.5914620906114578, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.736, | |
| "grad_norm": 4.065870761871338, | |
| "learning_rate": 0.001, | |
| "loss": 7.881, | |
| "mean_token_accuracy": 0.5708122700452805, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.7424, | |
| "grad_norm": 4.9694318771362305, | |
| "learning_rate": 0.001, | |
| "loss": 7.6188, | |
| "mean_token_accuracy": 0.5728907287120819, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.7488, | |
| "grad_norm": 3.600388526916504, | |
| "learning_rate": 0.001, | |
| "loss": 6.7958, | |
| "mean_token_accuracy": 0.6140304803848267, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.7552, | |
| "grad_norm": 4.772489070892334, | |
| "learning_rate": 0.001, | |
| "loss": 7.8477, | |
| "mean_token_accuracy": 0.5720326602458954, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.7616, | |
| "grad_norm": 4.109669208526611, | |
| "learning_rate": 0.001, | |
| "loss": 7.8857, | |
| "mean_token_accuracy": 0.5725124627351761, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.768, | |
| "grad_norm": 3.9959371089935303, | |
| "learning_rate": 0.001, | |
| "loss": 7.8924, | |
| "mean_token_accuracy": 0.5729866176843643, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.7744, | |
| "grad_norm": 4.218033790588379, | |
| "learning_rate": 0.001, | |
| "loss": 8.5045, | |
| "mean_token_accuracy": 0.5448136478662491, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.7808, | |
| "grad_norm": 10.680689811706543, | |
| "learning_rate": 0.001, | |
| "loss": 8.5677, | |
| "mean_token_accuracy": 0.5437836945056915, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.7872, | |
| "grad_norm": 6.1264729499816895, | |
| "learning_rate": 0.001, | |
| "loss": 10.1013, | |
| "mean_token_accuracy": 0.4813794791698456, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.7936, | |
| "grad_norm": 3.4104678630828857, | |
| "learning_rate": 0.001, | |
| "loss": 7.5739, | |
| "mean_token_accuracy": 0.5576579570770264, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 4.8629255294799805, | |
| "learning_rate": 0.001, | |
| "loss": 8.2406, | |
| "mean_token_accuracy": 0.5386597216129303, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.8064, | |
| "grad_norm": 4.2899627685546875, | |
| "learning_rate": 0.001, | |
| "loss": 8.4591, | |
| "mean_token_accuracy": 0.5415358692407608, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.8128, | |
| "grad_norm": 3.7481234073638916, | |
| "learning_rate": 0.001, | |
| "loss": 7.0793, | |
| "mean_token_accuracy": 0.6038873642683029, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.8192, | |
| "grad_norm": 3.3412671089172363, | |
| "learning_rate": 0.001, | |
| "loss": 6.6192, | |
| "mean_token_accuracy": 0.6217593550682068, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.8256, | |
| "grad_norm": 4.920543670654297, | |
| "learning_rate": 0.001, | |
| "loss": 8.4534, | |
| "mean_token_accuracy": 0.5469236895442009, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.832, | |
| "grad_norm": 3.746335029602051, | |
| "learning_rate": 0.001, | |
| "loss": 8.2707, | |
| "mean_token_accuracy": 0.5559498444199562, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.8384, | |
| "grad_norm": 3.816923141479492, | |
| "learning_rate": 0.001, | |
| "loss": 7.3012, | |
| "mean_token_accuracy": 0.5551306903362274, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.8448, | |
| "grad_norm": 5.7253265380859375, | |
| "learning_rate": 0.001, | |
| "loss": 7.7264, | |
| "mean_token_accuracy": 0.5539910942316055, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.8512, | |
| "grad_norm": 9.863517761230469, | |
| "learning_rate": 0.001, | |
| "loss": 8.4424, | |
| "mean_token_accuracy": 0.551963597536087, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.8576, | |
| "grad_norm": 5.7852582931518555, | |
| "learning_rate": 0.001, | |
| "loss": 8.8394, | |
| "mean_token_accuracy": 0.503598153591156, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.864, | |
| "grad_norm": 4.214358806610107, | |
| "learning_rate": 0.001, | |
| "loss": 8.0501, | |
| "mean_token_accuracy": 0.578537181019783, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.8704, | |
| "grad_norm": 4.677027225494385, | |
| "learning_rate": 0.001, | |
| "loss": 8.2782, | |
| "mean_token_accuracy": 0.556526243686676, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.8768, | |
| "grad_norm": 4.3732590675354, | |
| "learning_rate": 0.001, | |
| "loss": 7.1854, | |
| "mean_token_accuracy": 0.5627769976854324, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.8832, | |
| "grad_norm": 4.285269260406494, | |
| "learning_rate": 0.001, | |
| "loss": 7.5983, | |
| "mean_token_accuracy": 0.5761594474315643, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.8896, | |
| "grad_norm": 4.189344882965088, | |
| "learning_rate": 0.001, | |
| "loss": 7.796, | |
| "mean_token_accuracy": 0.5710103362798691, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.896, | |
| "grad_norm": 5.359838008880615, | |
| "learning_rate": 0.001, | |
| "loss": 9.3741, | |
| "mean_token_accuracy": 0.522984579205513, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9024, | |
| "grad_norm": 4.133533000946045, | |
| "learning_rate": 0.001, | |
| "loss": 8.7714, | |
| "mean_token_accuracy": 0.49868279695510864, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.9088, | |
| "grad_norm": 3.432337522506714, | |
| "learning_rate": 0.001, | |
| "loss": 7.1933, | |
| "mean_token_accuracy": 0.5911879688501358, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.9152, | |
| "grad_norm": 3.4508094787597656, | |
| "learning_rate": 0.001, | |
| "loss": 8.4234, | |
| "mean_token_accuracy": 0.544566810131073, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.9216, | |
| "grad_norm": 4.863613605499268, | |
| "learning_rate": 0.001, | |
| "loss": 8.2328, | |
| "mean_token_accuracy": 0.5418634340167046, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.928, | |
| "grad_norm": 3.8848912715911865, | |
| "learning_rate": 0.001, | |
| "loss": 8.2236, | |
| "mean_token_accuracy": 0.5698046088218689, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.9344, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.001, | |
| "loss": 8.5748, | |
| "mean_token_accuracy": 0.5264320075511932, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.9408, | |
| "grad_norm": 4.209794998168945, | |
| "learning_rate": 0.001, | |
| "loss": 8.6393, | |
| "mean_token_accuracy": 0.5443025454878807, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.9472, | |
| "grad_norm": 5.81450891494751, | |
| "learning_rate": 0.001, | |
| "loss": 8.0271, | |
| "mean_token_accuracy": 0.5531649142503738, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.9536, | |
| "grad_norm": 3.847665786743164, | |
| "learning_rate": 0.001, | |
| "loss": 7.4199, | |
| "mean_token_accuracy": 0.5528692901134491, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 4.067963123321533, | |
| "learning_rate": 0.001, | |
| "loss": 6.8948, | |
| "mean_token_accuracy": 0.6041955649852753, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.9664, | |
| "grad_norm": 3.6745011806488037, | |
| "learning_rate": 0.001, | |
| "loss": 7.916, | |
| "mean_token_accuracy": 0.5488733500242233, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.9728, | |
| "grad_norm": 3.931896686553955, | |
| "learning_rate": 0.001, | |
| "loss": 7.7169, | |
| "mean_token_accuracy": 0.5718288719654083, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.9792, | |
| "grad_norm": 4.551197052001953, | |
| "learning_rate": 0.001, | |
| "loss": 8.1617, | |
| "mean_token_accuracy": 0.5373415797948837, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.9856, | |
| "grad_norm": 4.994777679443359, | |
| "learning_rate": 0.001, | |
| "loss": 6.9665, | |
| "mean_token_accuracy": 0.5668836236000061, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.992, | |
| "grad_norm": 4.934599876403809, | |
| "learning_rate": 0.001, | |
| "loss": 8.9109, | |
| "mean_token_accuracy": 0.5285608246922493, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.9984, | |
| "grad_norm": 4.362490653991699, | |
| "learning_rate": 0.001, | |
| "loss": 7.9886, | |
| "mean_token_accuracy": 0.5652178972959518, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 2.0191662311553955, | |
| "learning_rate": 0.001, | |
| "loss": 2.2094, | |
| "mean_token_accuracy": 0.49253731966018677, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 1.0064, | |
| "grad_norm": 4.179709434509277, | |
| "learning_rate": 0.001, | |
| "loss": 6.0564, | |
| "mean_token_accuracy": 0.6441845297813416, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 1.0128, | |
| "grad_norm": 4.1790971755981445, | |
| "learning_rate": 0.001, | |
| "loss": 7.223, | |
| "mean_token_accuracy": 0.5850173979997635, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 1.0192, | |
| "grad_norm": 3.393885374069214, | |
| "learning_rate": 0.001, | |
| "loss": 5.8742, | |
| "mean_token_accuracy": 0.6555072665214539, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.0256, | |
| "grad_norm": 4.170093536376953, | |
| "learning_rate": 0.001, | |
| "loss": 6.4492, | |
| "mean_token_accuracy": 0.6223380863666534, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 1.032, | |
| "grad_norm": 4.816079616546631, | |
| "learning_rate": 0.001, | |
| "loss": 8.458, | |
| "mean_token_accuracy": 0.5561687871813774, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 1.0384, | |
| "grad_norm": 7.0680108070373535, | |
| "learning_rate": 0.001, | |
| "loss": 6.4478, | |
| "mean_token_accuracy": 0.6458811461925507, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 1.0448, | |
| "grad_norm": 5.175418376922607, | |
| "learning_rate": 0.001, | |
| "loss": 8.6144, | |
| "mean_token_accuracy": 0.5237443894147873, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 1.0512, | |
| "grad_norm": 3.455251693725586, | |
| "learning_rate": 0.001, | |
| "loss": 6.6271, | |
| "mean_token_accuracy": 0.6214667707681656, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.0576, | |
| "grad_norm": 4.4586992263793945, | |
| "learning_rate": 0.001, | |
| "loss": 5.9971, | |
| "mean_token_accuracy": 0.654425784945488, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 1.064, | |
| "grad_norm": 4.797828197479248, | |
| "learning_rate": 0.001, | |
| "loss": 7.2682, | |
| "mean_token_accuracy": 0.5919231176376343, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 1.0704, | |
| "grad_norm": 4.61244010925293, | |
| "learning_rate": 0.001, | |
| "loss": 6.1862, | |
| "mean_token_accuracy": 0.6452524065971375, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 1.0768, | |
| "grad_norm": 5.201780796051025, | |
| "learning_rate": 0.001, | |
| "loss": 9.1169, | |
| "mean_token_accuracy": 0.5475455224514008, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 1.0832, | |
| "grad_norm": 5.166975975036621, | |
| "learning_rate": 0.001, | |
| "loss": 6.9593, | |
| "mean_token_accuracy": 0.5929352939128876, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.0896, | |
| "grad_norm": 5.824632167816162, | |
| "learning_rate": 0.001, | |
| "loss": 8.294, | |
| "mean_token_accuracy": 0.5294871926307678, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 1.096, | |
| "grad_norm": 9.257010459899902, | |
| "learning_rate": 0.001, | |
| "loss": 7.0368, | |
| "mean_token_accuracy": 0.6035382002592087, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 1.1024, | |
| "grad_norm": 5.524269104003906, | |
| "learning_rate": 0.001, | |
| "loss": 5.6529, | |
| "mean_token_accuracy": 0.6525093466043472, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 1.1088, | |
| "grad_norm": 5.32748556137085, | |
| "learning_rate": 0.001, | |
| "loss": 6.7001, | |
| "mean_token_accuracy": 0.6103013008832932, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 1.1152, | |
| "grad_norm": 5.178861141204834, | |
| "learning_rate": 0.001, | |
| "loss": 6.7637, | |
| "mean_token_accuracy": 0.6111182868480682, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.1216, | |
| "grad_norm": 3.9681689739227295, | |
| "learning_rate": 0.001, | |
| "loss": 6.0235, | |
| "mean_token_accuracy": 0.6353351175785065, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 1.1280000000000001, | |
| "grad_norm": 4.23559045791626, | |
| "learning_rate": 0.001, | |
| "loss": 6.2243, | |
| "mean_token_accuracy": 0.636825680732727, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 1.1344, | |
| "grad_norm": 9.25437068939209, | |
| "learning_rate": 0.001, | |
| "loss": 7.3467, | |
| "mean_token_accuracy": 0.6091018617153168, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 1.1408, | |
| "grad_norm": 6.518167018890381, | |
| "learning_rate": 0.001, | |
| "loss": 7.4647, | |
| "mean_token_accuracy": 0.5769879817962646, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 1.1472, | |
| "grad_norm": 8.98256778717041, | |
| "learning_rate": 0.001, | |
| "loss": 8.0621, | |
| "mean_token_accuracy": 0.5615440681576729, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.1536, | |
| "grad_norm": 6.962597370147705, | |
| "learning_rate": 0.001, | |
| "loss": 7.0736, | |
| "mean_token_accuracy": 0.6006975322961807, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 15.468770027160645, | |
| "learning_rate": 0.001, | |
| "loss": 7.8873, | |
| "mean_token_accuracy": 0.5628447085618973, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 1.1663999999999999, | |
| "grad_norm": 6.4656596183776855, | |
| "learning_rate": 0.001, | |
| "loss": 7.7006, | |
| "mean_token_accuracy": 0.5857141762971878, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 1.1728, | |
| "grad_norm": 6.264251708984375, | |
| "learning_rate": 0.001, | |
| "loss": 8.5055, | |
| "mean_token_accuracy": 0.5526725947856903, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 1.1792, | |
| "grad_norm": 9.173776626586914, | |
| "learning_rate": 0.001, | |
| "loss": 8.3511, | |
| "mean_token_accuracy": 0.5329839885234833, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.1856, | |
| "grad_norm": 6.497045993804932, | |
| "learning_rate": 0.001, | |
| "loss": 8.5874, | |
| "mean_token_accuracy": 0.5194432288408279, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 1.192, | |
| "grad_norm": 6.55042839050293, | |
| "learning_rate": 0.001, | |
| "loss": 7.2464, | |
| "mean_token_accuracy": 0.5852644890546799, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 1.1984, | |
| "grad_norm": 4.928357124328613, | |
| "learning_rate": 0.001, | |
| "loss": 7.421, | |
| "mean_token_accuracy": 0.575689509510994, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 1.2048, | |
| "grad_norm": 6.555416107177734, | |
| "learning_rate": 0.001, | |
| "loss": 7.1958, | |
| "mean_token_accuracy": 0.6004299223423004, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 1.2112, | |
| "grad_norm": 6.944830417633057, | |
| "learning_rate": 0.001, | |
| "loss": 6.8997, | |
| "mean_token_accuracy": 0.5822109580039978, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.2176, | |
| "grad_norm": 8.618417739868164, | |
| "learning_rate": 0.001, | |
| "loss": 8.1635, | |
| "mean_token_accuracy": 0.5582073777914047, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 1.224, | |
| "grad_norm": 8.295172691345215, | |
| "learning_rate": 0.001, | |
| "loss": 7.5801, | |
| "mean_token_accuracy": 0.5919029116630554, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 1.2304, | |
| "grad_norm": 12.239678382873535, | |
| "learning_rate": 0.001, | |
| "loss": 7.3438, | |
| "mean_token_accuracy": 0.5995124354958534, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 1.2368000000000001, | |
| "grad_norm": 7.0297441482543945, | |
| "learning_rate": 0.001, | |
| "loss": 8.2068, | |
| "mean_token_accuracy": 0.537700891494751, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 1.2432, | |
| "grad_norm": 5.0657639503479, | |
| "learning_rate": 0.001, | |
| "loss": 7.3465, | |
| "mean_token_accuracy": 0.587388165295124, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.2496, | |
| "grad_norm": 5.155961990356445, | |
| "learning_rate": 0.001, | |
| "loss": 7.4988, | |
| "mean_token_accuracy": 0.6019035428762436, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 1.256, | |
| "grad_norm": 3.893186569213867, | |
| "learning_rate": 0.001, | |
| "loss": 5.796, | |
| "mean_token_accuracy": 0.624050185084343, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 1.2624, | |
| "grad_norm": 7.488790512084961, | |
| "learning_rate": 0.001, | |
| "loss": 8.4914, | |
| "mean_token_accuracy": 0.5403816178441048, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 1.2688, | |
| "grad_norm": 10.15825080871582, | |
| "learning_rate": 0.001, | |
| "loss": 6.9975, | |
| "mean_token_accuracy": 0.5812895894050598, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 1.2752, | |
| "grad_norm": 5.484641075134277, | |
| "learning_rate": 0.001, | |
| "loss": 6.5434, | |
| "mean_token_accuracy": 0.6408251523971558, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.2816, | |
| "grad_norm": 6.395635604858398, | |
| "learning_rate": 0.001, | |
| "loss": 7.7083, | |
| "mean_token_accuracy": 0.5760865062475204, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 1.288, | |
| "grad_norm": 7.382782936096191, | |
| "learning_rate": 0.001, | |
| "loss": 7.147, | |
| "mean_token_accuracy": 0.5934395641088486, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 1.2944, | |
| "grad_norm": 6.834714412689209, | |
| "learning_rate": 0.001, | |
| "loss": 7.6222, | |
| "mean_token_accuracy": 0.5739501044154167, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 1.3008, | |
| "grad_norm": 5.695088863372803, | |
| "learning_rate": 0.001, | |
| "loss": 7.475, | |
| "mean_token_accuracy": 0.5647561475634575, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 1.3072, | |
| "grad_norm": 5.658081531524658, | |
| "learning_rate": 0.001, | |
| "loss": 6.6691, | |
| "mean_token_accuracy": 0.6017946600914001, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 1.3136, | |
| "grad_norm": 9.058781623840332, | |
| "learning_rate": 0.001, | |
| "loss": 6.3451, | |
| "mean_token_accuracy": 0.6354028433561325, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 6.336489200592041, | |
| "learning_rate": 0.001, | |
| "loss": 7.9945, | |
| "mean_token_accuracy": 0.5321162715554237, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 1.3264, | |
| "grad_norm": 14.819890022277832, | |
| "learning_rate": 0.001, | |
| "loss": 9.4337, | |
| "mean_token_accuracy": 0.5192963108420372, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 1.3328, | |
| "grad_norm": 6.626960754394531, | |
| "learning_rate": 0.001, | |
| "loss": 7.0263, | |
| "mean_token_accuracy": 0.6068531572818756, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 1.3392, | |
| "grad_norm": 7.5550384521484375, | |
| "learning_rate": 0.001, | |
| "loss": 7.5964, | |
| "mean_token_accuracy": 0.5845440551638603, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.3456000000000001, | |
| "grad_norm": 9.043644905090332, | |
| "learning_rate": 0.001, | |
| "loss": 8.9667, | |
| "mean_token_accuracy": 0.5143468156456947, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 1.3519999999999999, | |
| "grad_norm": 15.724257469177246, | |
| "learning_rate": 0.001, | |
| "loss": 7.97, | |
| "mean_token_accuracy": 0.5721839368343353, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 1.3584, | |
| "grad_norm": 12.023893356323242, | |
| "learning_rate": 0.001, | |
| "loss": 7.454, | |
| "mean_token_accuracy": 0.599329836666584, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 1.3648, | |
| "grad_norm": 6.79151725769043, | |
| "learning_rate": 0.001, | |
| "loss": 7.7166, | |
| "mean_token_accuracy": 0.5553706511855125, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 1.3712, | |
| "grad_norm": 6.837470054626465, | |
| "learning_rate": 0.001, | |
| "loss": 10.8317, | |
| "mean_token_accuracy": 0.4465588107705116, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 1.3776, | |
| "grad_norm": 9.545637130737305, | |
| "learning_rate": 0.001, | |
| "loss": 6.7084, | |
| "mean_token_accuracy": 0.593764916062355, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 1.384, | |
| "grad_norm": 5.631826877593994, | |
| "learning_rate": 0.001, | |
| "loss": 6.8109, | |
| "mean_token_accuracy": 0.5985722541809082, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 1.3904, | |
| "grad_norm": 7.997573375701904, | |
| "learning_rate": 0.001, | |
| "loss": 7.6082, | |
| "mean_token_accuracy": 0.5720451474189758, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 1.3968, | |
| "grad_norm": 10.023303985595703, | |
| "learning_rate": 0.001, | |
| "loss": 8.4064, | |
| "mean_token_accuracy": 0.5365717485547066, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 1.4032, | |
| "grad_norm": 7.888483047485352, | |
| "learning_rate": 0.001, | |
| "loss": 8.3661, | |
| "mean_token_accuracy": 0.5367622226476669, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.4096, | |
| "grad_norm": 7.387235164642334, | |
| "learning_rate": 0.001, | |
| "loss": 7.0045, | |
| "mean_token_accuracy": 0.5931309461593628, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 1.416, | |
| "grad_norm": 4.908820629119873, | |
| "learning_rate": 0.001, | |
| "loss": 7.457, | |
| "mean_token_accuracy": 0.5676573291420937, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 1.4224, | |
| "grad_norm": 7.541878700256348, | |
| "learning_rate": 0.001, | |
| "loss": 8.5576, | |
| "mean_token_accuracy": 0.5507887750864029, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 1.4288, | |
| "grad_norm": 6.35562801361084, | |
| "learning_rate": 0.001, | |
| "loss": 7.1046, | |
| "mean_token_accuracy": 0.5813699215650558, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 1.4352, | |
| "grad_norm": 5.955767631530762, | |
| "learning_rate": 0.001, | |
| "loss": 7.5295, | |
| "mean_token_accuracy": 0.5491400882601738, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 1.4416, | |
| "grad_norm": 10.310933113098145, | |
| "learning_rate": 0.001, | |
| "loss": 7.4046, | |
| "mean_token_accuracy": 0.5788294672966003, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 1.448, | |
| "grad_norm": 8.590178489685059, | |
| "learning_rate": 0.001, | |
| "loss": 6.6688, | |
| "mean_token_accuracy": 0.5879671573638916, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 1.4544000000000001, | |
| "grad_norm": 7.05693244934082, | |
| "learning_rate": 0.001, | |
| "loss": 9.785, | |
| "mean_token_accuracy": 0.4796893522143364, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 1.4607999999999999, | |
| "grad_norm": 7.907512187957764, | |
| "learning_rate": 0.001, | |
| "loss": 6.369, | |
| "mean_token_accuracy": 0.6156620383262634, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 1.4672, | |
| "grad_norm": 5.780457973480225, | |
| "learning_rate": 0.001, | |
| "loss": 7.7141, | |
| "mean_token_accuracy": 0.5557262748479843, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.4736, | |
| "grad_norm": 12.134108543395996, | |
| "learning_rate": 0.001, | |
| "loss": 8.9121, | |
| "mean_token_accuracy": 0.5156212523579597, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 6.636194229125977, | |
| "learning_rate": 0.001, | |
| "loss": 7.12, | |
| "mean_token_accuracy": 0.6036765873432159, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 1.4864, | |
| "grad_norm": 5.020055770874023, | |
| "learning_rate": 0.001, | |
| "loss": 6.3509, | |
| "mean_token_accuracy": 0.6170566231012344, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 1.4928, | |
| "grad_norm": 10.214864730834961, | |
| "learning_rate": 0.001, | |
| "loss": 6.5362, | |
| "mean_token_accuracy": 0.6308254599571228, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 1.4992, | |
| "grad_norm": 11.291118621826172, | |
| "learning_rate": 0.001, | |
| "loss": 8.2135, | |
| "mean_token_accuracy": 0.5482836216688156, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 1.5056, | |
| "grad_norm": 10.337629318237305, | |
| "learning_rate": 0.001, | |
| "loss": 7.4203, | |
| "mean_token_accuracy": 0.5811362043023109, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 1.512, | |
| "grad_norm": 8.213200569152832, | |
| "learning_rate": 0.001, | |
| "loss": 8.0874, | |
| "mean_token_accuracy": 0.5553788542747498, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 1.5184, | |
| "grad_norm": 35.60185623168945, | |
| "learning_rate": 0.001, | |
| "loss": 8.3935, | |
| "mean_token_accuracy": 0.5098748654127121, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 1.5248, | |
| "grad_norm": 10.800209045410156, | |
| "learning_rate": 0.001, | |
| "loss": 8.3645, | |
| "mean_token_accuracy": 0.5591541230678558, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 1.5312000000000001, | |
| "grad_norm": 10.682641983032227, | |
| "learning_rate": 0.001, | |
| "loss": 8.0174, | |
| "mean_token_accuracy": 0.5395123660564423, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.5375999999999999, | |
| "grad_norm": 16.261472702026367, | |
| "learning_rate": 0.001, | |
| "loss": 8.4602, | |
| "mean_token_accuracy": 0.5196909606456757, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 1.544, | |
| "grad_norm": 7.711510181427002, | |
| "learning_rate": 0.001, | |
| "loss": 7.8776, | |
| "mean_token_accuracy": 0.5560385212302208, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 1.5504, | |
| "grad_norm": 10.960959434509277, | |
| "learning_rate": 0.001, | |
| "loss": 8.9444, | |
| "mean_token_accuracy": 0.5182236731052399, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 1.5568, | |
| "grad_norm": 7.446345806121826, | |
| "learning_rate": 0.001, | |
| "loss": 6.9821, | |
| "mean_token_accuracy": 0.6031645685434341, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 1.5632000000000001, | |
| "grad_norm": 8.444716453552246, | |
| "learning_rate": 0.001, | |
| "loss": 7.3512, | |
| "mean_token_accuracy": 0.5782665312290192, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 1.5695999999999999, | |
| "grad_norm": 11.063419342041016, | |
| "learning_rate": 0.001, | |
| "loss": 8.5629, | |
| "mean_token_accuracy": 0.5185030549764633, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 1.576, | |
| "grad_norm": 8.18127727508545, | |
| "learning_rate": 0.001, | |
| "loss": 8.4838, | |
| "mean_token_accuracy": 0.5051439926028252, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 1.5824, | |
| "grad_norm": 9.148998260498047, | |
| "learning_rate": 0.001, | |
| "loss": 8.8174, | |
| "mean_token_accuracy": 0.515631377696991, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 1.5888, | |
| "grad_norm": 7.322795867919922, | |
| "learning_rate": 0.001, | |
| "loss": 7.4007, | |
| "mean_token_accuracy": 0.5654035359621048, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 1.5952, | |
| "grad_norm": 9.227678298950195, | |
| "learning_rate": 0.001, | |
| "loss": 9.3753, | |
| "mean_token_accuracy": 0.5103882774710655, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.6016, | |
| "grad_norm": 13.938738822937012, | |
| "learning_rate": 0.001, | |
| "loss": 7.1796, | |
| "mean_token_accuracy": 0.5777264684438705, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 1.608, | |
| "grad_norm": 23.90799903869629, | |
| "learning_rate": 0.001, | |
| "loss": 7.985, | |
| "mean_token_accuracy": 0.5455044433474541, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 1.6143999999999998, | |
| "grad_norm": 10.978887557983398, | |
| "learning_rate": 0.001, | |
| "loss": 7.6049, | |
| "mean_token_accuracy": 0.5702199786901474, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 1.6208, | |
| "grad_norm": 8.08303165435791, | |
| "learning_rate": 0.001, | |
| "loss": 7.5714, | |
| "mean_token_accuracy": 0.5959209054708481, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 1.6272, | |
| "grad_norm": 9.555181503295898, | |
| "learning_rate": 0.001, | |
| "loss": 7.6343, | |
| "mean_token_accuracy": 0.5681372657418251, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 1.6336, | |
| "grad_norm": 8.243929862976074, | |
| "learning_rate": 0.001, | |
| "loss": 8.2715, | |
| "mean_token_accuracy": 0.5482152029871941, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 1.6400000000000001, | |
| "grad_norm": 8.662081718444824, | |
| "learning_rate": 0.001, | |
| "loss": 7.8241, | |
| "mean_token_accuracy": 0.5627938807010651, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 1.6463999999999999, | |
| "grad_norm": 8.32907772064209, | |
| "learning_rate": 0.001, | |
| "loss": 7.335, | |
| "mean_token_accuracy": 0.5363176316022873, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 1.6528, | |
| "grad_norm": 11.057466506958008, | |
| "learning_rate": 0.001, | |
| "loss": 9.5491, | |
| "mean_token_accuracy": 0.5060033053159714, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 1.6592, | |
| "grad_norm": 83.32975006103516, | |
| "learning_rate": 0.001, | |
| "loss": 9.0589, | |
| "mean_token_accuracy": 0.50930255651474, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.6656, | |
| "grad_norm": 8.165995597839355, | |
| "learning_rate": 0.001, | |
| "loss": 7.9928, | |
| "mean_token_accuracy": 0.563197672367096, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 1.6720000000000002, | |
| "grad_norm": 16.992876052856445, | |
| "learning_rate": 0.001, | |
| "loss": 9.8182, | |
| "mean_token_accuracy": 0.48126567155122757, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 1.6784, | |
| "grad_norm": 8.095207214355469, | |
| "learning_rate": 0.001, | |
| "loss": 9.419, | |
| "mean_token_accuracy": 0.500243678689003, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 1.6848, | |
| "grad_norm": 16.054994583129883, | |
| "learning_rate": 0.001, | |
| "loss": 8.7803, | |
| "mean_token_accuracy": 0.5126448944211006, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 1.6912, | |
| "grad_norm": 16.552730560302734, | |
| "learning_rate": 0.001, | |
| "loss": 8.9847, | |
| "mean_token_accuracy": 0.5172990635037422, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 1.6976, | |
| "grad_norm": 13.06109619140625, | |
| "learning_rate": 0.001, | |
| "loss": 10.1881, | |
| "mean_token_accuracy": 0.49380549043416977, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 1.704, | |
| "grad_norm": 10.187677383422852, | |
| "learning_rate": 0.001, | |
| "loss": 8.4531, | |
| "mean_token_accuracy": 0.5305923670530319, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 1.7104, | |
| "grad_norm": 8.00294017791748, | |
| "learning_rate": 0.001, | |
| "loss": 8.55, | |
| "mean_token_accuracy": 0.5253442898392677, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 1.7168, | |
| "grad_norm": 8.169543266296387, | |
| "learning_rate": 0.001, | |
| "loss": 10.1152, | |
| "mean_token_accuracy": 0.4511877968907356, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 1.7231999999999998, | |
| "grad_norm": 8.316620826721191, | |
| "learning_rate": 0.001, | |
| "loss": 7.8197, | |
| "mean_token_accuracy": 0.5667182505130768, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.7296, | |
| "grad_norm": 7.95322847366333, | |
| "learning_rate": 0.001, | |
| "loss": 8.585, | |
| "mean_token_accuracy": 0.5186630189418793, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 1.736, | |
| "grad_norm": 10.643982887268066, | |
| "learning_rate": 0.001, | |
| "loss": 8.1634, | |
| "mean_token_accuracy": 0.5534348115324974, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 1.7424, | |
| "grad_norm": 11.432665824890137, | |
| "learning_rate": 0.001, | |
| "loss": 9.9191, | |
| "mean_token_accuracy": 0.4892067015171051, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 1.7488000000000001, | |
| "grad_norm": 12.11987590789795, | |
| "learning_rate": 0.001, | |
| "loss": 7.9962, | |
| "mean_token_accuracy": 0.5544887632131577, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 1.7551999999999999, | |
| "grad_norm": 16.141345977783203, | |
| "learning_rate": 0.001, | |
| "loss": 9.2237, | |
| "mean_token_accuracy": 0.5020880922675133, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 1.7616, | |
| "grad_norm": 11.97499942779541, | |
| "learning_rate": 0.001, | |
| "loss": 9.4026, | |
| "mean_token_accuracy": 0.4923320710659027, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 1.768, | |
| "grad_norm": 20.49795913696289, | |
| "learning_rate": 0.001, | |
| "loss": 8.6141, | |
| "mean_token_accuracy": 0.5215461701154709, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 1.7744, | |
| "grad_norm": 20.775779724121094, | |
| "learning_rate": 0.001, | |
| "loss": 11.6113, | |
| "mean_token_accuracy": 0.4201728478074074, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 1.7808000000000002, | |
| "grad_norm": 8.977204322814941, | |
| "learning_rate": 0.001, | |
| "loss": 8.8081, | |
| "mean_token_accuracy": 0.49830809235572815, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 1.7872, | |
| "grad_norm": 11.948820114135742, | |
| "learning_rate": 0.001, | |
| "loss": 9.8821, | |
| "mean_token_accuracy": 0.49480871111154556, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.7936, | |
| "grad_norm": 10.269512176513672, | |
| "learning_rate": 0.001, | |
| "loss": 8.2235, | |
| "mean_token_accuracy": 0.5259484350681305, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 5.860231876373291, | |
| "learning_rate": 0.001, | |
| "loss": 8.3539, | |
| "mean_token_accuracy": 0.5288261473178864, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 1.8064, | |
| "grad_norm": 9.967249870300293, | |
| "learning_rate": 0.001, | |
| "loss": 9.6113, | |
| "mean_token_accuracy": 0.45666269958019257, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 1.8128, | |
| "grad_norm": 10.237951278686523, | |
| "learning_rate": 0.001, | |
| "loss": 7.8902, | |
| "mean_token_accuracy": 0.5173894092440605, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 1.8192, | |
| "grad_norm": 14.716999053955078, | |
| "learning_rate": 0.001, | |
| "loss": 9.5343, | |
| "mean_token_accuracy": 0.5037772506475449, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 1.8256000000000001, | |
| "grad_norm": 10.649497032165527, | |
| "learning_rate": 0.001, | |
| "loss": 9.8439, | |
| "mean_token_accuracy": 0.48058757930994034, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 1.8319999999999999, | |
| "grad_norm": 15.107233047485352, | |
| "learning_rate": 0.001, | |
| "loss": 8.1109, | |
| "mean_token_accuracy": 0.5341241806745529, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 1.8384, | |
| "grad_norm": 12.771748542785645, | |
| "learning_rate": 0.001, | |
| "loss": 9.2594, | |
| "mean_token_accuracy": 0.48890865594148636, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 1.8448, | |
| "grad_norm": 9.82298469543457, | |
| "learning_rate": 0.001, | |
| "loss": 8.4437, | |
| "mean_token_accuracy": 0.5308887958526611, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 1.8512, | |
| "grad_norm": 22.638954162597656, | |
| "learning_rate": 0.001, | |
| "loss": 8.8215, | |
| "mean_token_accuracy": 0.5280868858098984, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.8576000000000001, | |
| "grad_norm": 10.145191192626953, | |
| "learning_rate": 0.001, | |
| "loss": 9.1644, | |
| "mean_token_accuracy": 0.5151188969612122, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 1.8639999999999999, | |
| "grad_norm": 7.520384311676025, | |
| "learning_rate": 0.001, | |
| "loss": 8.3904, | |
| "mean_token_accuracy": 0.5109719932079315, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 1.8704, | |
| "grad_norm": 16.988628387451172, | |
| "learning_rate": 0.001, | |
| "loss": 8.5715, | |
| "mean_token_accuracy": 0.5133798718452454, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 1.8768, | |
| "grad_norm": 10.056252479553223, | |
| "learning_rate": 0.001, | |
| "loss": 8.0868, | |
| "mean_token_accuracy": 0.544953741133213, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 1.8832, | |
| "grad_norm": 27.852039337158203, | |
| "learning_rate": 0.001, | |
| "loss": 6.7302, | |
| "mean_token_accuracy": 0.594126746058464, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 1.8896, | |
| "grad_norm": 12.71786880493164, | |
| "learning_rate": 0.001, | |
| "loss": 9.0788, | |
| "mean_token_accuracy": 0.5247168093919754, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 1.896, | |
| "grad_norm": 15.346375465393066, | |
| "learning_rate": 0.001, | |
| "loss": 8.3889, | |
| "mean_token_accuracy": 0.5260215997695923, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 1.9024, | |
| "grad_norm": 23.141864776611328, | |
| "learning_rate": 0.001, | |
| "loss": 8.985, | |
| "mean_token_accuracy": 0.5044504255056381, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 1.9088, | |
| "grad_norm": 13.86075210571289, | |
| "learning_rate": 0.001, | |
| "loss": 9.9779, | |
| "mean_token_accuracy": 0.4863489866256714, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 1.9152, | |
| "grad_norm": 12.302156448364258, | |
| "learning_rate": 0.001, | |
| "loss": 10.3654, | |
| "mean_token_accuracy": 0.4447909966111183, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.069164560285696e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |