| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9954545454545455, | |
| "eval_steps": 500, | |
| "global_step": 146, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006818181818181818, | |
| "grad_norm": 1.8943578004837036, | |
| "learning_rate": 5.0000000000000004e-08, | |
| "loss": 0.8676, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.013636363636363636, | |
| "grad_norm": 7.775813102722168, | |
| "learning_rate": 1.0000000000000001e-07, | |
| "loss": 0.8513, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.020454545454545454, | |
| "grad_norm": 1.768803596496582, | |
| "learning_rate": 1.5000000000000002e-07, | |
| "loss": 0.8339, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.02727272727272727, | |
| "grad_norm": 1.7032263278961182, | |
| "learning_rate": 2.0000000000000002e-07, | |
| "loss": 0.8348, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.03409090909090909, | |
| "grad_norm": 1.8244796991348267, | |
| "learning_rate": 2.5000000000000004e-07, | |
| "loss": 0.8687, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.04090909090909091, | |
| "grad_norm": 1.7780181169509888, | |
| "learning_rate": 3.0000000000000004e-07, | |
| "loss": 0.8113, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.04772727272727273, | |
| "grad_norm": 1.8362191915512085, | |
| "learning_rate": 3.5000000000000004e-07, | |
| "loss": 0.8463, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.05454545454545454, | |
| "grad_norm": 1.6640979051589966, | |
| "learning_rate": 4.0000000000000003e-07, | |
| "loss": 0.8336, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.06136363636363636, | |
| "grad_norm": 1.8013464212417603, | |
| "learning_rate": 4.5000000000000003e-07, | |
| "loss": 0.8572, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.06818181818181818, | |
| "grad_norm": 1.7237999439239502, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 0.8501, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.075, | |
| "grad_norm": 1.989346981048584, | |
| "learning_rate": 5.5e-07, | |
| "loss": 0.8615, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.08181818181818182, | |
| "grad_norm": 2.0312082767486572, | |
| "learning_rate": 6.000000000000001e-07, | |
| "loss": 0.858, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.08863636363636364, | |
| "grad_norm": 1.6921299695968628, | |
| "learning_rate": 6.5e-07, | |
| "loss": 0.8577, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.09545454545454546, | |
| "grad_norm": 2.4207160472869873, | |
| "learning_rate": 7.000000000000001e-07, | |
| "loss": 0.8564, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.10227272727272728, | |
| "grad_norm": 1.7572585344314575, | |
| "learning_rate": 7.5e-07, | |
| "loss": 0.8409, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.10909090909090909, | |
| "grad_norm": 1.6135950088500977, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 0.8086, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.1159090909090909, | |
| "grad_norm": 2.226036787033081, | |
| "learning_rate": 8.500000000000001e-07, | |
| "loss": 0.8106, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.12272727272727273, | |
| "grad_norm": 1.7164231538772583, | |
| "learning_rate": 9.000000000000001e-07, | |
| "loss": 0.8494, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.12954545454545455, | |
| "grad_norm": 1.6572000980377197, | |
| "learning_rate": 9.500000000000001e-07, | |
| "loss": 0.8023, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.13636363636363635, | |
| "grad_norm": 1.5672118663787842, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 0.787, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1431818181818182, | |
| "grad_norm": 1.5080257654190063, | |
| "learning_rate": 1.0500000000000001e-06, | |
| "loss": 0.8105, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 1.484892725944519, | |
| "learning_rate": 1.1e-06, | |
| "loss": 0.8258, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.15681818181818183, | |
| "grad_norm": 1.4591134786605835, | |
| "learning_rate": 1.1500000000000002e-06, | |
| "loss": 0.8032, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.16363636363636364, | |
| "grad_norm": 1.5506278276443481, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "loss": 0.8236, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.17045454545454544, | |
| "grad_norm": 1.639350414276123, | |
| "learning_rate": 1.25e-06, | |
| "loss": 0.8185, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.17727272727272728, | |
| "grad_norm": 1.592640995979309, | |
| "learning_rate": 1.3e-06, | |
| "loss": 0.7898, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.18409090909090908, | |
| "grad_norm": 1.5280641317367554, | |
| "learning_rate": 1.3500000000000002e-06, | |
| "loss": 0.7731, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.19090909090909092, | |
| "grad_norm": 1.4015443325042725, | |
| "learning_rate": 1.4000000000000001e-06, | |
| "loss": 0.7956, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.19772727272727272, | |
| "grad_norm": 1.4890056848526, | |
| "learning_rate": 1.45e-06, | |
| "loss": 0.7838, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.20454545454545456, | |
| "grad_norm": 2.4770750999450684, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.7766, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.21136363636363636, | |
| "grad_norm": 1.5863685607910156, | |
| "learning_rate": 1.5500000000000002e-06, | |
| "loss": 0.7353, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.21818181818181817, | |
| "grad_norm": 1.058114767074585, | |
| "learning_rate": 1.6000000000000001e-06, | |
| "loss": 0.7237, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.225, | |
| "grad_norm": 1.5869101285934448, | |
| "learning_rate": 1.6500000000000003e-06, | |
| "loss": 0.7597, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.2318181818181818, | |
| "grad_norm": 1.0012413263320923, | |
| "learning_rate": 1.7000000000000002e-06, | |
| "loss": 0.7388, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.23863636363636365, | |
| "grad_norm": 1.118056058883667, | |
| "learning_rate": 1.75e-06, | |
| "loss": 0.7786, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.24545454545454545, | |
| "grad_norm": 1.0648820400238037, | |
| "learning_rate": 1.8000000000000001e-06, | |
| "loss": 0.7502, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.25227272727272726, | |
| "grad_norm": 0.9440382719039917, | |
| "learning_rate": 1.85e-06, | |
| "loss": 0.6942, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.2590909090909091, | |
| "grad_norm": 0.8300187587738037, | |
| "learning_rate": 1.9000000000000002e-06, | |
| "loss": 0.7312, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.26590909090909093, | |
| "grad_norm": 0.8296191692352295, | |
| "learning_rate": 1.9500000000000004e-06, | |
| "loss": 0.7461, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 0.7797508239746094, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.7234, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.27954545454545454, | |
| "grad_norm": 0.7335019707679749, | |
| "learning_rate": 2.05e-06, | |
| "loss": 0.6927, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.2863636363636364, | |
| "grad_norm": 0.7062106728553772, | |
| "learning_rate": 2.1000000000000002e-06, | |
| "loss": 0.6552, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.29318181818181815, | |
| "grad_norm": 1.2342087030410767, | |
| "learning_rate": 2.15e-06, | |
| "loss": 0.6959, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.8967597484588623, | |
| "learning_rate": 2.2e-06, | |
| "loss": 0.704, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.3068181818181818, | |
| "grad_norm": 0.7228404879570007, | |
| "learning_rate": 2.25e-06, | |
| "loss": 0.6777, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.31363636363636366, | |
| "grad_norm": 0.6731488704681396, | |
| "learning_rate": 2.3000000000000004e-06, | |
| "loss": 0.6652, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.32045454545454544, | |
| "grad_norm": 0.71966952085495, | |
| "learning_rate": 2.35e-06, | |
| "loss": 0.7281, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.32727272727272727, | |
| "grad_norm": 0.7057356238365173, | |
| "learning_rate": 2.4000000000000003e-06, | |
| "loss": 0.6476, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.3340909090909091, | |
| "grad_norm": 0.6346054077148438, | |
| "learning_rate": 2.4500000000000003e-06, | |
| "loss": 0.6865, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.3409090909090909, | |
| "grad_norm": 0.6938223838806152, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.6808, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3477272727272727, | |
| "grad_norm": 0.7425184845924377, | |
| "learning_rate": 2.55e-06, | |
| "loss": 0.7074, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.35454545454545455, | |
| "grad_norm": 0.6743818521499634, | |
| "learning_rate": 2.6e-06, | |
| "loss": 0.657, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.3613636363636364, | |
| "grad_norm": 0.6785942316055298, | |
| "learning_rate": 2.6500000000000005e-06, | |
| "loss": 0.6768, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.36818181818181817, | |
| "grad_norm": 0.586974024772644, | |
| "learning_rate": 2.7000000000000004e-06, | |
| "loss": 0.651, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 0.5727331042289734, | |
| "learning_rate": 2.7500000000000004e-06, | |
| "loss": 0.669, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.38181818181818183, | |
| "grad_norm": 0.7333543300628662, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 0.6571, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.3886363636363636, | |
| "grad_norm": 0.5710961818695068, | |
| "learning_rate": 2.85e-06, | |
| "loss": 0.6475, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.39545454545454545, | |
| "grad_norm": 0.5614489912986755, | |
| "learning_rate": 2.9e-06, | |
| "loss": 0.6314, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.4022727272727273, | |
| "grad_norm": 0.5007341504096985, | |
| "learning_rate": 2.95e-06, | |
| "loss": 0.6346, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.4090909090909091, | |
| "grad_norm": 0.54221510887146, | |
| "learning_rate": 3e-06, | |
| "loss": 0.6715, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4159090909090909, | |
| "grad_norm": 0.5177039504051208, | |
| "learning_rate": 3.05e-06, | |
| "loss": 0.6459, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.42272727272727273, | |
| "grad_norm": 0.4741189181804657, | |
| "learning_rate": 3.1000000000000004e-06, | |
| "loss": 0.592, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.42954545454545456, | |
| "grad_norm": 0.5637328028678894, | |
| "learning_rate": 3.1500000000000003e-06, | |
| "loss": 0.6459, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.43636363636363634, | |
| "grad_norm": 0.5478869676589966, | |
| "learning_rate": 3.2000000000000003e-06, | |
| "loss": 0.6447, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.4431818181818182, | |
| "grad_norm": 0.5483130216598511, | |
| "learning_rate": 3.2500000000000002e-06, | |
| "loss": 0.6319, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.478081613779068, | |
| "learning_rate": 3.3000000000000006e-06, | |
| "loss": 0.6102, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.45681818181818185, | |
| "grad_norm": 0.4777645766735077, | |
| "learning_rate": 3.3500000000000005e-06, | |
| "loss": 0.5986, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.4636363636363636, | |
| "grad_norm": 0.5291482210159302, | |
| "learning_rate": 3.4000000000000005e-06, | |
| "loss": 0.6314, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.47045454545454546, | |
| "grad_norm": 0.5224051475524902, | |
| "learning_rate": 3.45e-06, | |
| "loss": 0.6278, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.4772727272727273, | |
| "grad_norm": 0.6474127173423767, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.6398, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.48409090909090907, | |
| "grad_norm": 0.5060178637504578, | |
| "learning_rate": 3.5500000000000003e-06, | |
| "loss": 0.5936, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.4909090909090909, | |
| "grad_norm": 3.0769243240356445, | |
| "learning_rate": 3.6000000000000003e-06, | |
| "loss": 0.5975, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.49772727272727274, | |
| "grad_norm": 0.5319749116897583, | |
| "learning_rate": 3.65e-06, | |
| "loss": 0.6119, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.5045454545454545, | |
| "grad_norm": 0.6745399236679077, | |
| "learning_rate": 3.7e-06, | |
| "loss": 0.6213, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.5113636363636364, | |
| "grad_norm": 0.5156731009483337, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "loss": 0.6262, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.5181818181818182, | |
| "grad_norm": 0.9138725399971008, | |
| "learning_rate": 3.8000000000000005e-06, | |
| "loss": 0.6244, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.525, | |
| "grad_norm": 0.4362037777900696, | |
| "learning_rate": 3.85e-06, | |
| "loss": 0.5874, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.5318181818181819, | |
| "grad_norm": 0.4674088954925537, | |
| "learning_rate": 3.900000000000001e-06, | |
| "loss": 0.615, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.5386363636363637, | |
| "grad_norm": 0.4590819180011749, | |
| "learning_rate": 3.95e-06, | |
| "loss": 0.6003, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 0.5145031809806824, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.6079, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5522727272727272, | |
| "grad_norm": 0.46660348773002625, | |
| "learning_rate": 4.05e-06, | |
| "loss": 0.6174, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.5590909090909091, | |
| "grad_norm": 0.48053789138793945, | |
| "learning_rate": 4.1e-06, | |
| "loss": 0.6232, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.5659090909090909, | |
| "grad_norm": 0.540793776512146, | |
| "learning_rate": 4.15e-06, | |
| "loss": 0.619, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.5727272727272728, | |
| "grad_norm": 0.4925222396850586, | |
| "learning_rate": 4.2000000000000004e-06, | |
| "loss": 0.5838, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.5795454545454546, | |
| "grad_norm": 0.44989290833473206, | |
| "learning_rate": 4.25e-06, | |
| "loss": 0.5972, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.5863636363636363, | |
| "grad_norm": 0.42691388726234436, | |
| "learning_rate": 4.3e-06, | |
| "loss": 0.6046, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.5931818181818181, | |
| "grad_norm": 0.4512398838996887, | |
| "learning_rate": 4.350000000000001e-06, | |
| "loss": 0.5828, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.44399499893188477, | |
| "learning_rate": 4.4e-06, | |
| "loss": 0.6143, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.6068181818181818, | |
| "grad_norm": 0.46058326959609985, | |
| "learning_rate": 4.450000000000001e-06, | |
| "loss": 0.6117, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.6136363636363636, | |
| "grad_norm": 0.8795785903930664, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.6, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6204545454545455, | |
| "grad_norm": 0.5148798227310181, | |
| "learning_rate": 4.5500000000000005e-06, | |
| "loss": 0.6041, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.6272727272727273, | |
| "grad_norm": 0.48829612135887146, | |
| "learning_rate": 4.600000000000001e-06, | |
| "loss": 0.5492, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.634090909090909, | |
| "grad_norm": 0.5211894512176514, | |
| "learning_rate": 4.65e-06, | |
| "loss": 0.6044, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.6409090909090909, | |
| "grad_norm": 0.44403275847435, | |
| "learning_rate": 4.7e-06, | |
| "loss": 0.5929, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.6477272727272727, | |
| "grad_norm": 0.6037693619728088, | |
| "learning_rate": 4.75e-06, | |
| "loss": 0.6132, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.6545454545454545, | |
| "grad_norm": 0.4381515085697174, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 0.5822, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.6613636363636364, | |
| "grad_norm": 0.4997427761554718, | |
| "learning_rate": 4.85e-06, | |
| "loss": 0.593, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.6681818181818182, | |
| "grad_norm": 0.440571665763855, | |
| "learning_rate": 4.9000000000000005e-06, | |
| "loss": 0.6072, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.675, | |
| "grad_norm": 0.5020624995231628, | |
| "learning_rate": 4.95e-06, | |
| "loss": 0.5861, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.6818181818181818, | |
| "grad_norm": 0.4280160963535309, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6163, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6886363636363636, | |
| "grad_norm": 0.4584537148475647, | |
| "learning_rate": 4.9999795126530275e-06, | |
| "loss": 0.6177, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.6954545454545454, | |
| "grad_norm": 0.6835049390792847, | |
| "learning_rate": 4.999918050947891e-06, | |
| "loss": 0.579, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.7022727272727273, | |
| "grad_norm": 0.4551607072353363, | |
| "learning_rate": 4.999815615891943e-06, | |
| "loss": 0.5927, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.7090909090909091, | |
| "grad_norm": 0.5893972516059875, | |
| "learning_rate": 4.9996722091640805e-06, | |
| "loss": 0.5775, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.7159090909090909, | |
| "grad_norm": 0.4574092924594879, | |
| "learning_rate": 4.9994878331147225e-06, | |
| "loss": 0.5863, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.7227272727272728, | |
| "grad_norm": 0.5354658365249634, | |
| "learning_rate": 4.99926249076577e-06, | |
| "loss": 0.5453, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.7295454545454545, | |
| "grad_norm": 0.4347354471683502, | |
| "learning_rate": 4.998996185810557e-06, | |
| "loss": 0.5913, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.7363636363636363, | |
| "grad_norm": 0.4487966299057007, | |
| "learning_rate": 4.998688922613788e-06, | |
| "loss": 0.5749, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.7431818181818182, | |
| "grad_norm": 0.42477577924728394, | |
| "learning_rate": 4.9983407062114695e-06, | |
| "loss": 0.5761, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.4801446497440338, | |
| "learning_rate": 4.9979515423108255e-06, | |
| "loss": 0.5977, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7568181818181818, | |
| "grad_norm": 0.4876883029937744, | |
| "learning_rate": 4.997521437290205e-06, | |
| "loss": 0.5849, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.7636363636363637, | |
| "grad_norm": 0.4328872263431549, | |
| "learning_rate": 4.997050398198977e-06, | |
| "loss": 0.5988, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.7704545454545455, | |
| "grad_norm": 0.4332719147205353, | |
| "learning_rate": 4.996538432757414e-06, | |
| "loss": 0.6059, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.7772727272727272, | |
| "grad_norm": 0.4455336630344391, | |
| "learning_rate": 4.995985549356568e-06, | |
| "loss": 0.5725, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.7840909090909091, | |
| "grad_norm": 0.4197766184806824, | |
| "learning_rate": 4.995391757058129e-06, | |
| "loss": 0.5715, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.7909090909090909, | |
| "grad_norm": 0.41945499181747437, | |
| "learning_rate": 4.99475706559428e-06, | |
| "loss": 0.5845, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.7977272727272727, | |
| "grad_norm": 0.42414823174476624, | |
| "learning_rate": 4.994081485367537e-06, | |
| "loss": 0.5595, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.8045454545454546, | |
| "grad_norm": 0.4056423008441925, | |
| "learning_rate": 4.993365027450576e-06, | |
| "loss": 0.582, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.8113636363636364, | |
| "grad_norm": 0.4554205536842346, | |
| "learning_rate": 4.992607703586058e-06, | |
| "loss": 0.5591, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.8181818181818182, | |
| "grad_norm": 0.4307089149951935, | |
| "learning_rate": 4.991809526186424e-06, | |
| "loss": 0.5927, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.825, | |
| "grad_norm": 0.48133528232574463, | |
| "learning_rate": 4.990970508333707e-06, | |
| "loss": 0.5703, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.8318181818181818, | |
| "grad_norm": 0.48582738637924194, | |
| "learning_rate": 4.990090663779305e-06, | |
| "loss": 0.5491, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.8386363636363636, | |
| "grad_norm": 0.44926196336746216, | |
| "learning_rate": 4.9891700069437635e-06, | |
| "loss": 0.5803, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.8454545454545455, | |
| "grad_norm": 0.4562165141105652, | |
| "learning_rate": 4.988208552916535e-06, | |
| "loss": 0.5945, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.8522727272727273, | |
| "grad_norm": 0.5885360836982727, | |
| "learning_rate": 4.987206317455734e-06, | |
| "loss": 0.5632, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.8590909090909091, | |
| "grad_norm": 0.4525550603866577, | |
| "learning_rate": 4.986163316987877e-06, | |
| "loss": 0.5221, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.865909090909091, | |
| "grad_norm": 0.4545478820800781, | |
| "learning_rate": 4.985079568607613e-06, | |
| "loss": 0.587, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.8727272727272727, | |
| "grad_norm": 0.4364396333694458, | |
| "learning_rate": 4.983955090077445e-06, | |
| "loss": 0.5626, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.8795454545454545, | |
| "grad_norm": 0.4354369640350342, | |
| "learning_rate": 4.982789899827439e-06, | |
| "loss": 0.5674, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.8863636363636364, | |
| "grad_norm": 0.4822773337364197, | |
| "learning_rate": 4.9815840169549216e-06, | |
| "loss": 0.5685, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.8931818181818182, | |
| "grad_norm": 0.44673025608062744, | |
| "learning_rate": 4.980337461224164e-06, | |
| "loss": 0.5733, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.6294501423835754, | |
| "learning_rate": 4.979050253066064e-06, | |
| "loss": 0.5396, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.9068181818181819, | |
| "grad_norm": 0.42890027165412903, | |
| "learning_rate": 4.977722413577802e-06, | |
| "loss": 0.5718, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.9136363636363637, | |
| "grad_norm": 0.470587819814682, | |
| "learning_rate": 4.976353964522509e-06, | |
| "loss": 0.5705, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.9204545454545454, | |
| "grad_norm": 0.4632768929004669, | |
| "learning_rate": 4.974944928328894e-06, | |
| "loss": 0.5394, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.9272727272727272, | |
| "grad_norm": 0.5304691195487976, | |
| "learning_rate": 4.973495328090891e-06, | |
| "loss": 0.5762, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.9340909090909091, | |
| "grad_norm": 0.42036354541778564, | |
| "learning_rate": 4.972005187567267e-06, | |
| "loss": 0.5649, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.9409090909090909, | |
| "grad_norm": 0.45189663767814636, | |
| "learning_rate": 4.970474531181245e-06, | |
| "loss": 0.5554, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.9477272727272728, | |
| "grad_norm": 0.47352468967437744, | |
| "learning_rate": 4.968903384020095e-06, | |
| "loss": 0.5511, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.9545454545454546, | |
| "grad_norm": 0.525702953338623, | |
| "learning_rate": 4.967291771834727e-06, | |
| "loss": 0.5777, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9613636363636363, | |
| "grad_norm": 0.46146854758262634, | |
| "learning_rate": 4.965639721039267e-06, | |
| "loss": 0.5537, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.9681818181818181, | |
| "grad_norm": 0.5200941562652588, | |
| "learning_rate": 4.963947258710626e-06, | |
| "loss": 0.5522, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.975, | |
| "grad_norm": 0.5260375142097473, | |
| "learning_rate": 4.962214412588053e-06, | |
| "loss": 0.5747, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.9818181818181818, | |
| "grad_norm": 0.518645703792572, | |
| "learning_rate": 4.960441211072686e-06, | |
| "loss": 0.5257, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.9886363636363636, | |
| "grad_norm": 0.4528891444206238, | |
| "learning_rate": 4.9586276832270785e-06, | |
| "loss": 0.5548, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.9954545454545455, | |
| "grad_norm": 0.5137557983398438, | |
| "learning_rate": 4.9567738587747314e-06, | |
| "loss": 0.5526, | |
| "step": 146 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 876, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 146, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.39244756481029e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |