Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 2410, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002074688796680498, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.319502074688797e-07, | |
| "loss": 0.8792, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.004149377593360996, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.639004149377594e-07, | |
| "loss": 0.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.006224066390041493, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.95850622406639e-07, | |
| "loss": 0.0, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.008298755186721992, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3278008298755188e-06, | |
| "loss": 0.0, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.01037344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6597510373443984e-06, | |
| "loss": 0.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.012448132780082987, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.991701244813278e-06, | |
| "loss": 0.0, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.014522821576763486, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.323651452282158e-06, | |
| "loss": 0.0, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.016597510373443983, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6556016597510377e-06, | |
| "loss": 0.0, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.01867219917012448, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9875518672199173e-06, | |
| "loss": 0.0, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.02074688796680498, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.319502074688797e-06, | |
| "loss": 0.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.022821576763485476, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6514522821576765e-06, | |
| "loss": 0.0, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.024896265560165973, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.983402489626556e-06, | |
| "loss": 0.0, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.026970954356846474, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.315352697095436e-06, | |
| "loss": 0.0, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.029045643153526972, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.647302904564316e-06, | |
| "loss": 0.0, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.03112033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.979253112033195e-06, | |
| "loss": 0.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.03319502074688797, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.311203319502075e-06, | |
| "loss": 0.0, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.035269709543568464, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6431535269709545e-06, | |
| "loss": 0.0, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.03734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.9751037344398345e-06, | |
| "loss": 0.0, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.03941908713692946, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.307053941908714e-06, | |
| "loss": 0.0, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.04149377593360996, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.639004149377594e-06, | |
| "loss": 0.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.043568464730290454, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.970954356846473e-06, | |
| "loss": 0.0, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.04564315352697095, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.302904564315353e-06, | |
| "loss": 0.0, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.04771784232365145, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.634854771784232e-06, | |
| "loss": 0.0, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.04979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.966804979253112e-06, | |
| "loss": 0.0, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.05186721991701245, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.298755186721992e-06, | |
| "loss": 0.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.05394190871369295, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.630705394190872e-06, | |
| "loss": 0.0, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.056016597510373446, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.962655601659752e-06, | |
| "loss": 0.0, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.058091286307053944, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.294605809128632e-06, | |
| "loss": 0.0, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.06016597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.62655601659751e-06, | |
| "loss": 0.0, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.06224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.95850622406639e-06, | |
| "loss": 0.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06431535269709543, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0290456431535269e-05, | |
| "loss": 0.0, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.06639004149377593, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.062240663900415e-05, | |
| "loss": 0.0, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.06846473029045644, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.095435684647303e-05, | |
| "loss": 0.0, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.07053941908713693, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1286307053941909e-05, | |
| "loss": 0.0, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.07261410788381743, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.161825726141079e-05, | |
| "loss": 0.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.07468879668049792, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1950207468879669e-05, | |
| "loss": 0.0, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.07676348547717843, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2282157676348549e-05, | |
| "loss": 0.0, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.07883817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2614107883817427e-05, | |
| "loss": 0.0, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.08091286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2946058091286309e-05, | |
| "loss": 0.0, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.08298755186721991, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3278008298755187e-05, | |
| "loss": 0.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.08506224066390042, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3609958506224067e-05, | |
| "loss": 0.0, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.08713692946058091, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3941908713692946e-05, | |
| "loss": 0.0, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.08921161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4273858921161828e-05, | |
| "loss": 0.0, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.0912863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4605809128630706e-05, | |
| "loss": 0.0, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.09336099585062241, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4937759336099586e-05, | |
| "loss": 0.0, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.0954356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5269709543568464e-05, | |
| "loss": 0.0, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.0975103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5601659751037346e-05, | |
| "loss": 0.0, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.0995850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5933609958506224e-05, | |
| "loss": 0.0, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.1016597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6265560165975106e-05, | |
| "loss": 0.0, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.1037344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6597510373443984e-05, | |
| "loss": 0.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.10580912863070539, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6929460580912863e-05, | |
| "loss": 0.0, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.1078838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7261410788381744e-05, | |
| "loss": 0.0, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.10995850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7593360995850623e-05, | |
| "loss": 0.0, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.11203319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7925311203319504e-05, | |
| "loss": 0.0, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.11410788381742738, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8257261410788383e-05, | |
| "loss": 0.0, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.11618257261410789, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8589211618257264e-05, | |
| "loss": 0.0, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.11825726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8921161825726143e-05, | |
| "loss": 0.0, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.12033195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.925311203319502e-05, | |
| "loss": 0.0, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.12240663900414937, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9585062240663903e-05, | |
| "loss": 0.0, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.12448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.991701244813278e-05, | |
| "loss": 0.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.12655601659751037, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.024896265560166e-05, | |
| "loss": 0.0, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.12863070539419086, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0580912863070538e-05, | |
| "loss": 0.0, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.13070539419087138, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0912863070539423e-05, | |
| "loss": 0.0, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.13278008298755187, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.12448132780083e-05, | |
| "loss": 0.0, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.13485477178423236, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.157676348547718e-05, | |
| "loss": 0.0, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.13692946058091288, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.190871369294606e-05, | |
| "loss": 0.0, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.13900414937759337, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.224066390041494e-05, | |
| "loss": 0.0, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.14107883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2572614107883818e-05, | |
| "loss": 0.0, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.14315352697095435, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2904564315352696e-05, | |
| "loss": 0.0, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.14522821576763487, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.323651452282158e-05, | |
| "loss": 0.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.14730290456431536, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.356846473029046e-05, | |
| "loss": 0.0, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.14937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3900414937759338e-05, | |
| "loss": 0.0, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.15145228215767634, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4232365145228216e-05, | |
| "loss": 0.0, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.15352697095435686, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4564315352697098e-05, | |
| "loss": 0.0, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.15560165975103735, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4896265560165977e-05, | |
| "loss": 0.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.15767634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5228215767634855e-05, | |
| "loss": 0.0, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.15975103734439833, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5560165975103733e-05, | |
| "loss": 0.0, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.16182572614107885, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5892116182572618e-05, | |
| "loss": 0.0, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.16390041493775934, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6224066390041497e-05, | |
| "loss": 0.0, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.16597510373443983, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6556016597510375e-05, | |
| "loss": 0.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.16804979253112035, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6887966804979257e-05, | |
| "loss": 0.0, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.17012448132780084, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7219917012448135e-05, | |
| "loss": 0.0, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.17219917012448133, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7551867219917013e-05, | |
| "loss": 0.0, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.17427385892116182, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.788381742738589e-05, | |
| "loss": 0.0, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.17634854771784234, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8215767634854777e-05, | |
| "loss": 0.0, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.17842323651452283, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8547717842323655e-05, | |
| "loss": 0.0, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.18049792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8879668049792533e-05, | |
| "loss": 0.0, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.1825726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9211618257261412e-05, | |
| "loss": 0.0, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.18464730290456433, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9543568464730293e-05, | |
| "loss": 0.0, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.18672199170124482, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9875518672199172e-05, | |
| "loss": 0.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.1887966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.020746887966805e-05, | |
| "loss": 0.0, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.1908713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.053941908713693e-05, | |
| "loss": 0.0, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.19294605809128632, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.087136929460581e-05, | |
| "loss": 0.0, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.1950207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.120331950207469e-05, | |
| "loss": 0.0, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.1970954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.153526970954357e-05, | |
| "loss": 0.0, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.1991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.186721991701245e-05, | |
| "loss": 0.0, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.2012448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.219917012448133e-05, | |
| "loss": 0.0, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.2033195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.253112033195021e-05, | |
| "loss": 0.0, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.2053941908713693, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.286307053941909e-05, | |
| "loss": 0.0, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.2074688796680498, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.319502074688797e-05, | |
| "loss": 0.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2095435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.352697095435685e-05, | |
| "loss": 0.0, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.21161825726141079, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3858921161825725e-05, | |
| "loss": 0.0, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.21369294605809128, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.419087136929461e-05, | |
| "loss": 0.0, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.2157676348547718, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.452282157676349e-05, | |
| "loss": 0.0, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.21784232365145229, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.485477178423237e-05, | |
| "loss": 0.0, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.21991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5186721991701245e-05, | |
| "loss": 0.0, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.22199170124481327, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.551867219917013e-05, | |
| "loss": 0.0, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.22406639004149378, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.585062240663901e-05, | |
| "loss": 0.0, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.22614107883817428, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6182572614107884e-05, | |
| "loss": 0.0, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.22821576763485477, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6514522821576766e-05, | |
| "loss": 0.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.23029045643153526, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.684647302904565e-05, | |
| "loss": 0.0, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.23236514522821577, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.717842323651453e-05, | |
| "loss": 0.0, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.23443983402489627, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7510373443983404e-05, | |
| "loss": 0.0, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.23651452282157676, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7842323651452286e-05, | |
| "loss": 0.0, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.23858921161825727, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.817427385892117e-05, | |
| "loss": 0.0, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.24066390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.850622406639004e-05, | |
| "loss": 0.0, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.24273858921161826, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.8838174273858924e-05, | |
| "loss": 0.0, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.24481327800829875, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9170124481327806e-05, | |
| "loss": 0.0, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.24688796680497926, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.950207468879669e-05, | |
| "loss": 0.0, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.24896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.983402489626556e-05, | |
| "loss": 0.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.25103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.0165975103734444e-05, | |
| "loss": 0.0, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.25311203319502074, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.049792531120332e-05, | |
| "loss": 0.0, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.2551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.08298755186722e-05, | |
| "loss": 0.0, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.2572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1161825726141076e-05, | |
| "loss": 0.0, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.25933609958506226, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1493775933609964e-05, | |
| "loss": 0.0, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.26141078838174275, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1825726141078846e-05, | |
| "loss": 0.0, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.26348547717842324, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.215767634854772e-05, | |
| "loss": 0.0, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.26556016597510373, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.24896265560166e-05, | |
| "loss": 0.0, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.2676348547717842, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.282157676348548e-05, | |
| "loss": 0.0, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.2697095435684647, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.315352697095436e-05, | |
| "loss": 0.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2717842323651452, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.3485477178423234e-05, | |
| "loss": 0.0, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.27385892116182575, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.381742738589212e-05, | |
| "loss": 0.0, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.27593360995850624, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.4149377593361004e-05, | |
| "loss": 0.0, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.27800829875518673, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.448132780082988e-05, | |
| "loss": 0.0, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.2800829875518672, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.481327800829876e-05, | |
| "loss": 0.0, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.2821576763485477, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.5145228215767636e-05, | |
| "loss": 0.0, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.2842323651452282, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.547717842323652e-05, | |
| "loss": 0.0, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.2863070539419087, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.580912863070539e-05, | |
| "loss": 0.0, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.2883817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.6141078838174274e-05, | |
| "loss": 0.0, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.29045643153526973, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.647302904564316e-05, | |
| "loss": 0.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2925311203319502, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.680497925311204e-05, | |
| "loss": 0.0, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.2946058091286307, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.713692946058092e-05, | |
| "loss": 0.0, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.2966804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.7468879668049795e-05, | |
| "loss": 0.0, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.2987551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.7800829875518676e-05, | |
| "loss": 0.0, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.3008298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.813278008298755e-05, | |
| "loss": 0.0, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3029045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.846473029045643e-05, | |
| "loss": 0.0, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.3049792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.879668049792532e-05, | |
| "loss": 0.0, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.3070539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.9128630705394196e-05, | |
| "loss": 0.0, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.3091286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.946058091286308e-05, | |
| "loss": 0.0, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.3112033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.979253112033195e-05, | |
| "loss": 0.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3132780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.0124481327800835e-05, | |
| "loss": 0.0, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.3153526970954357, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.045643153526971e-05, | |
| "loss": 0.0, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.31742738589211617, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.078838174273859e-05, | |
| "loss": 0.0, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.31950207468879666, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1120331950207466e-05, | |
| "loss": 0.0, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.3215767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1452282157676355e-05, | |
| "loss": 0.0, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.3236514522821577, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1784232365145237e-05, | |
| "loss": 0.0, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.3257261410788382, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.211618257261411e-05, | |
| "loss": 0.0, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.3278008298755187, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.244813278008299e-05, | |
| "loss": 0.0, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.32987551867219916, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.278008298755187e-05, | |
| "loss": 0.0, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.33195020746887965, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.311203319502075e-05, | |
| "loss": 0.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.33402489626556015, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.3443983402489625e-05, | |
| "loss": 0.0, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.3360995850622407, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.377593360995851e-05, | |
| "loss": 0.0, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.3381742738589212, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4107883817427395e-05, | |
| "loss": 0.0, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.34024896265560167, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.443983402489627e-05, | |
| "loss": 0.0, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.34232365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.477178423236515e-05, | |
| "loss": 0.0, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.34439834024896265, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.510373443983403e-05, | |
| "loss": 0.0, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.34647302904564314, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.543568464730291e-05, | |
| "loss": 0.0, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.34854771784232363, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.576763485477178e-05, | |
| "loss": 0.0, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.3506224066390041, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6099585062240665e-05, | |
| "loss": 0.0, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.35269709543568467, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6431535269709554e-05, | |
| "loss": 0.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.35477178423236516, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.676348547717843e-05, | |
| "loss": 0.0, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.35684647302904565, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.709543568464731e-05, | |
| "loss": 0.0, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.35892116182572614, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.7427385892116185e-05, | |
| "loss": 0.0, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.36099585062240663, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.775933609958507e-05, | |
| "loss": 0.0, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.3630705394190871, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.809128630705394e-05, | |
| "loss": 0.0, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.3651452282157676, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.8423236514522824e-05, | |
| "loss": 0.0, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.36721991701244816, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.875518672199171e-05, | |
| "loss": 0.0, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.36929460580912865, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.908713692946059e-05, | |
| "loss": 0.0, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.37136929460580914, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.941908713692947e-05, | |
| "loss": 0.0, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.37344398340248963, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.9751037344398344e-05, | |
| "loss": 0.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.3755186721991701, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0082987551867225e-05, | |
| "loss": 0.0, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.3775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.04149377593361e-05, | |
| "loss": 0.0, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.3796680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.074688796680498e-05, | |
| "loss": 0.0, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.3817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.107883817427386e-05, | |
| "loss": 0.0, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.38381742738589214, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.141078838174275e-05, | |
| "loss": 0.0, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.38589211618257263, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.174273858921162e-05, | |
| "loss": 0.0, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.3879668049792531, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.20746887966805e-05, | |
| "loss": 0.0, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.3900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.240663900414938e-05, | |
| "loss": 0.0, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.3921161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.273858921161827e-05, | |
| "loss": 0.0, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.3941908713692946, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.307053941908713e-05, | |
| "loss": 0.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3962655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.340248962655602e-05, | |
| "loss": 0.0, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.3983402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.37344398340249e-05, | |
| "loss": 0.0, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.4004149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.406639004149378e-05, | |
| "loss": 0.0, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.4024896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.439834024896266e-05, | |
| "loss": 0.0, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.4045643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.473029045643154e-05, | |
| "loss": 0.0, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.4066390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.506224066390042e-05, | |
| "loss": 0.0, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.4087136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.539419087136929e-05, | |
| "loss": 0.0, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.4107883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.572614107883817e-05, | |
| "loss": 0.0, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.41286307053941906, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.605809128630706e-05, | |
| "loss": 0.0, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.4149377593360996, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.639004149377594e-05, | |
| "loss": 0.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4170124481327801, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.672199170124482e-05, | |
| "loss": 0.0, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.4190871369294606, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.70539419087137e-05, | |
| "loss": 0.0, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.4211618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.738589211618258e-05, | |
| "loss": 0.0, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.42323651452282157, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.771784232365145e-05, | |
| "loss": 0.0, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.42531120331950206, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.804979253112033e-05, | |
| "loss": 0.0, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.42738589211618255, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.838174273858921e-05, | |
| "loss": 0.0, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.42946058091286304, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.87136929460581e-05, | |
| "loss": 0.0, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.4315352697095436, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.904564315352698e-05, | |
| "loss": 0.0, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.4336099585062241, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.937759336099586e-05, | |
| "loss": 0.0, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.43568464730290457, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.970954356846474e-05, | |
| "loss": 0.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.43775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.004149377593361e-05, | |
| "loss": 0.0, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.43983402489626555, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.037344398340249e-05, | |
| "loss": 0.0, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.44190871369294604, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.070539419087137e-05, | |
| "loss": 0.0, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.44398340248962653, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.103734439834025e-05, | |
| "loss": 0.0, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.4460580912863071, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.136929460580914e-05, | |
| "loss": 0.0, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.44813278008298757, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.170124481327802e-05, | |
| "loss": 0.0, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.45020746887966806, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.20331950207469e-05, | |
| "loss": 0.0, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.45228215767634855, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.236514522821577e-05, | |
| "loss": 0.0, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.45435684647302904, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.269709543568465e-05, | |
| "loss": 0.0, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.45643153526970953, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.302904564315353e-05, | |
| "loss": 0.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.45850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.336099585062241e-05, | |
| "loss": 0.0, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.4605809128630705, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.36929460580913e-05, | |
| "loss": 0.0, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.46265560165975106, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.402489626556018e-05, | |
| "loss": 0.0, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.46473029045643155, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.435684647302906e-05, | |
| "loss": 0.0, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.46680497925311204, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.468879668049793e-05, | |
| "loss": 0.0, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.46887966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.502074688796681e-05, | |
| "loss": 0.0, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.470954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.535269709543569e-05, | |
| "loss": 0.0, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.4730290456431535, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.568464730290457e-05, | |
| "loss": 0.0, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.475103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.601659751037345e-05, | |
| "loss": 0.0, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.47717842323651455, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.634854771784233e-05, | |
| "loss": 0.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.47925311203319504, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.668049792531122e-05, | |
| "loss": 0.0, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.48132780082987553, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.701244813278008e-05, | |
| "loss": 0.0, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.483402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.734439834024897e-05, | |
| "loss": 0.0, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.4854771784232365, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.767634854771785e-05, | |
| "loss": 0.0, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.487551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.800829875518673e-05, | |
| "loss": 0.0, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.4896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.834024896265561e-05, | |
| "loss": 0.0, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.491701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.867219917012448e-05, | |
| "loss": 0.0, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.49377593360995853, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.900414937759337e-05, | |
| "loss": 0.0, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.495850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.933609958506224e-05, | |
| "loss": 0.0, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.4979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.966804979253112e-05, | |
| "loss": 0.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": NaN, | |
| "learning_rate": 8e-05, | |
| "loss": 0.0, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.5020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999995804240747e-05, | |
| "loss": 0.0, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.504149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999983216971788e-05, | |
| "loss": 0.0, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.5062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999962238219528e-05, | |
| "loss": 0.0, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.508298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999932868027982e-05, | |
| "loss": 0.0, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.5103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999895106458763e-05, | |
| "loss": 0.0, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.5124481327800829, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.99984895359109e-05, | |
| "loss": 0.0, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.5145228215767634, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999794409521784e-05, | |
| "loss": 0.0, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.516597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999731474365277e-05, | |
| "loss": 0.0, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.5186721991701245, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999660148253595e-05, | |
| "loss": 0.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.520746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999580431336375e-05, | |
| "loss": 0.0, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.5228215767634855, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.99949232378085e-05, | |
| "loss": 0.0, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.524896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999395825771862e-05, | |
| "loss": 0.0, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.5269709543568465, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.99929093751185e-05, | |
| "loss": 0.0, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.529045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999177659220859e-05, | |
| "loss": 0.0, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.5311203319502075, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.999055991136532e-05, | |
| "loss": 0.0, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.533195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.998925933514114e-05, | |
| "loss": 0.0, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.5352697095435685, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.998787486626451e-05, | |
| "loss": 0.0, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.5373443983402489, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.998640650763985e-05, | |
| "loss": 0.0, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.5394190871369294, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.998485426234764e-05, | |
| "loss": 0.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5414937759336099, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.998321813364428e-05, | |
| "loss": 0.0, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.5435684647302904, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.99814981249622e-05, | |
| "loss": 0.0, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.5456431535269709, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.997969423990973e-05, | |
| "loss": 0.0, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.5477178423236515, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.997780648227121e-05, | |
| "loss": 0.0, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.549792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.997583485600695e-05, | |
| "loss": 0.0, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.5518672199170125, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.997377936525319e-05, | |
| "loss": 0.0, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.553941908713693, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.997164001432208e-05, | |
| "loss": 0.0, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.5560165975103735, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.996941680770171e-05, | |
| "loss": 0.0, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.558091286307054, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.996710975005613e-05, | |
| "loss": 0.0, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.5601659751037344, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.996471884622527e-05, | |
| "loss": 0.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.5622406639004149, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.996224410122493e-05, | |
| "loss": 0.0, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.5643153526970954, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.995968552024685e-05, | |
| "loss": 0.0, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.5663900414937759, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.995704310865862e-05, | |
| "loss": 0.0, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.5684647302904564, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.995431687200368e-05, | |
| "loss": 0.0, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.5705394190871369, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.995150681600139e-05, | |
| "loss": 0.0, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.5726141078838174, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.994861294654687e-05, | |
| "loss": 0.0, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.5746887966804979, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.994563526971112e-05, | |
| "loss": 0.0, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.5767634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.994257379174097e-05, | |
| "loss": 0.0, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.578838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.9939428519059e-05, | |
| "loss": 0.0, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.5809128630705395, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.993619945826364e-05, | |
| "loss": 0.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.58298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.993288661612906e-05, | |
| "loss": 0.0, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.5850622406639004, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.992948999960518e-05, | |
| "loss": 0.0, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.5871369294605809, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.992600961581774e-05, | |
| "loss": 0.0, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.5892116182572614, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.992244547206813e-05, | |
| "loss": 0.0, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.5912863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.99187975758335e-05, | |
| "loss": 0.0, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.5933609958506224, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.991506593476672e-05, | |
| "loss": 0.0, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.5954356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.99112505566963e-05, | |
| "loss": 0.0, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.5975103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.990735144962645e-05, | |
| "loss": 0.0, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.5995850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.990336862173701e-05, | |
| "loss": 0.0, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.6016597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.989930208138351e-05, | |
| "loss": 0.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6037344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.989515183709705e-05, | |
| "loss": 0.0, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.6058091286307054, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.989091789758433e-05, | |
| "loss": 0.0, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.6078838174273858, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.988660027172764e-05, | |
| "loss": 0.0, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.6099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.988219896858487e-05, | |
| "loss": 0.0, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.6120331950207469, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.98777139973894e-05, | |
| "loss": 0.0, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.6141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.987314536755016e-05, | |
| "loss": 0.0, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.6161825726141079, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.986849308865159e-05, | |
| "loss": 0.0, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.6182572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.986375717045363e-05, | |
| "loss": 0.0, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.6203319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.985893762289163e-05, | |
| "loss": 0.0, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.6224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.985403445607645e-05, | |
| "loss": 0.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6244813278008299, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.984904768029432e-05, | |
| "loss": 0.0, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.6265560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.984397730600692e-05, | |
| "loss": 0.0, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.6286307053941909, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.983882334385126e-05, | |
| "loss": 0.0, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.6307053941908713, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.983358580463975e-05, | |
| "loss": 0.0, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.6327800829875518, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.982826469936012e-05, | |
| "loss": 0.0, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.6348547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.98228600391754e-05, | |
| "loss": 0.0, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.6369294605809128, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.981737183542391e-05, | |
| "loss": 0.0, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.6390041493775933, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.981180009961926e-05, | |
| "loss": 0.0, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.6410788381742739, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.980614484345025e-05, | |
| "loss": 0.0, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.6431535269709544, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.980040607878095e-05, | |
| "loss": 0.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.6452282157676349, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.97945838176506e-05, | |
| "loss": 0.0, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.6473029045643154, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.97886780722736e-05, | |
| "loss": 0.0, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.6493775933609959, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.978268885503949e-05, | |
| "loss": 0.0, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.6514522821576764, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.977661617851291e-05, | |
| "loss": 0.0, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.6535269709543569, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.977046005543361e-05, | |
| "loss": 0.0, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.6556016597510373, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.976422049871643e-05, | |
| "loss": 0.0, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.6576763485477178, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.975789752145118e-05, | |
| "loss": 0.0, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.6597510373443983, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.97514911369027e-05, | |
| "loss": 0.0, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.6618257261410788, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.974500135851084e-05, | |
| "loss": 0.0, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.6639004149377593, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.973842819989036e-05, | |
| "loss": 0.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.6659751037344398, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.973177167483093e-05, | |
| "loss": 0.0, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.6680497925311203, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.972503179729716e-05, | |
| "loss": 0.0, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.6701244813278008, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.971820858142851e-05, | |
| "loss": 0.0, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.6721991701244814, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.971130204153926e-05, | |
| "loss": 0.0, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.6742738589211619, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.970431219211849e-05, | |
| "loss": 0.0, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.6763485477178424, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.969723904783008e-05, | |
| "loss": 0.0, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.6784232365145229, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.969008262351262e-05, | |
| "loss": 0.0, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.6804979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.968284293417943e-05, | |
| "loss": 0.0, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.6825726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.967551999501853e-05, | |
| "loss": 0.0, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.6846473029045643, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.966811382139252e-05, | |
| "loss": 0.0, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.6867219917012448, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.96606244288387e-05, | |
| "loss": 0.0, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.6887966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.965305183306889e-05, | |
| "loss": 0.0, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.6908713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.964539604996949e-05, | |
| "loss": 0.0, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.6929460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.963765709560141e-05, | |
| "loss": 0.0, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.6950207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.962983498620006e-05, | |
| "loss": 0.0, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.6970954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.962192973817527e-05, | |
| "loss": 0.0, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.6991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.961394136811131e-05, | |
| "loss": 0.0, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.7012448132780082, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.96058698927668e-05, | |
| "loss": 0.0, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.7033195020746889, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.959771532907472e-05, | |
| "loss": 0.0, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.7053941908713693, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.95894776941424e-05, | |
| "loss": 0.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7074688796680498, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.958115700525138e-05, | |
| "loss": 0.0, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.7095435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.957275327985747e-05, | |
| "loss": 0.0, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.7116182572614108, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.956426653559066e-05, | |
| "loss": 0.0, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.7136929460580913, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.955569679025514e-05, | |
| "loss": 0.0, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.7157676348547718, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.954704406182918e-05, | |
| "loss": 0.0, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.7178423236514523, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.953830836846519e-05, | |
| "loss": 0.0, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.7199170124481328, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.952948972848959e-05, | |
| "loss": 0.0, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.7219917012448133, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.952058816040281e-05, | |
| "loss": 0.0, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.7240663900414938, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.95116036828793e-05, | |
| "loss": 0.0, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.7261410788381742, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.950253631476739e-05, | |
| "loss": 0.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.7282157676348547, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.949338607508933e-05, | |
| "loss": 0.0, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.7302904564315352, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.948415298304122e-05, | |
| "loss": 0.0, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.7323651452282157, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.947483705799299e-05, | |
| "loss": 0.0, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.7344398340248963, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.946543831948832e-05, | |
| "loss": 0.0, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.7365145228215768, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.945595678724461e-05, | |
| "loss": 0.0, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.7385892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.944639248115302e-05, | |
| "loss": 0.0, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.7406639004149378, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.943674542127827e-05, | |
| "loss": 0.0, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.7427385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.942701562785875e-05, | |
| "loss": 0.0, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.7448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.94172031213064e-05, | |
| "loss": 0.0, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.7468879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.940730792220667e-05, | |
| "loss": 0.0, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.7489626556016598, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.93973300513185e-05, | |
| "loss": 0.0, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.7510373443983402, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.938726952957424e-05, | |
| "loss": 0.0, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.7531120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.937712637807968e-05, | |
| "loss": 0.0, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.7551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.936690061811394e-05, | |
| "loss": 0.0, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.7572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.93565922711294e-05, | |
| "loss": 0.0, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.7593360995850622, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.934620135875176e-05, | |
| "loss": 0.0, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.7614107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.933572790277988e-05, | |
| "loss": 0.0, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.7634854771784232, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.932517192518582e-05, | |
| "loss": 0.0, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.7655601659751037, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.931453344811476e-05, | |
| "loss": 0.0, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.7676348547717843, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.930381249388494e-05, | |
| "loss": 0.0, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.7697095435684648, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.929300908498763e-05, | |
| "loss": 0.0, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.7717842323651453, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.928212324408706e-05, | |
| "loss": 0.0, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.7738589211618258, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.927115499402045e-05, | |
| "loss": 0.0, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.7759336099585062, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.926010435779784e-05, | |
| "loss": 0.0, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.7780082987551867, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.924897135860215e-05, | |
| "loss": 0.0, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.7800829875518672, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.923775601978908e-05, | |
| "loss": 0.0, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.7821576763485477, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.922645836488702e-05, | |
| "loss": 0.0, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.7842323651452282, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.921507841759714e-05, | |
| "loss": 0.0, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.7863070539419087, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.920361620179318e-05, | |
| "loss": 0.0, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.7883817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.919207174152148e-05, | |
| "loss": 0.0, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.7904564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.918044506100094e-05, | |
| "loss": 0.0, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.7925311203319502, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.916873618462293e-05, | |
| "loss": 0.0, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.7946058091286307, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.915694513695126e-05, | |
| "loss": 0.0, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.7966804979253111, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.914507194272214e-05, | |
| "loss": 0.0, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.7987551867219918, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.913311662684409e-05, | |
| "loss": 0.0, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.8008298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.912107921439793e-05, | |
| "loss": 0.0, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.8029045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.910895973063671e-05, | |
| "loss": 0.0, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.8049792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.909675820098563e-05, | |
| "loss": 0.0, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.8070539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.908447465104204e-05, | |
| "loss": 0.0, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.8091286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.907210910657536e-05, | |
| "loss": 0.0, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.8112033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.9059661593527e-05, | |
| "loss": 0.0, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.8132780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.904713213801034e-05, | |
| "loss": 0.0, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.8153526970954357, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.903452076631068e-05, | |
| "loss": 0.0, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.8174273858921162, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.902182750488516e-05, | |
| "loss": 0.0, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.8195020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.900905238036272e-05, | |
| "loss": 0.0, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.8215767634854771, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.899619541954399e-05, | |
| "loss": 0.0, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.8236514522821576, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.89832566494014e-05, | |
| "loss": 0.0, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.8257261410788381, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.897023609707887e-05, | |
| "loss": 0.0, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.8278008298755186, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.895713378989199e-05, | |
| "loss": 0.0, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.8298755186721992, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.894394975532779e-05, | |
| "loss": 0.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8319502074688797, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.893068402104482e-05, | |
| "loss": 0.0, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.8340248962655602, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.891733661487298e-05, | |
| "loss": 0.0, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.8360995850622407, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.890390756481352e-05, | |
| "loss": 0.0, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.8381742738589212, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.889039689903896e-05, | |
| "loss": 0.0, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.8402489626556017, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.887680464589306e-05, | |
| "loss": 0.0, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.8423236514522822, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.886313083389074e-05, | |
| "loss": 0.0, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.8443983402489627, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.8849375491718e-05, | |
| "loss": 0.0, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.8464730290456431, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.88355386482319e-05, | |
| "loss": 0.0, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.8485477178423236, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.882162033246047e-05, | |
| "loss": 0.0, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.8506224066390041, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.880762057360266e-05, | |
| "loss": 0.0, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.8526970954356846, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.879353940102827e-05, | |
| "loss": 0.0, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.8547717842323651, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.877937684427791e-05, | |
| "loss": 0.0, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.8568464730290456, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.876513293306292e-05, | |
| "loss": 0.0, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.8589211618257261, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.875080769726534e-05, | |
| "loss": 0.0, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.8609958506224067, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.873640116693775e-05, | |
| "loss": 0.0, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.8630705394190872, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.872191337230332e-05, | |
| "loss": 0.0, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.8651452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.870734434375573e-05, | |
| "loss": 0.0, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.8672199170124482, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.869269411185901e-05, | |
| "loss": 0.0, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.8692946058091287, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.86779627073476e-05, | |
| "loss": 0.0, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 0.8713692946058091, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.866315016112622e-05, | |
| "loss": 0.0, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.8734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.86482565042698e-05, | |
| "loss": 0.0, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 0.8755186721991701, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.863328176802345e-05, | |
| "loss": 0.0, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 0.8775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.861822598380236e-05, | |
| "loss": 0.0, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 0.8796680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.860308918319174e-05, | |
| "loss": 0.0, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 0.8817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.858787139794679e-05, | |
| "loss": 0.0, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.8838174273858921, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.857257265999258e-05, | |
| "loss": 0.0, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 0.8858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.855719300142403e-05, | |
| "loss": 0.0, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.8879668049792531, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.854173245450581e-05, | |
| "loss": 0.0, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 0.8900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.852619105167229e-05, | |
| "loss": 0.0, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 0.8921161825726142, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.851056882552746e-05, | |
| "loss": 0.0, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.8941908713692946, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.849486580884486e-05, | |
| "loss": 0.0, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 0.8962655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.847908203456755e-05, | |
| "loss": 0.0, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.8983402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.846321753580797e-05, | |
| "loss": 0.0, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 0.9004149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.844727234584794e-05, | |
| "loss": 0.0, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 0.9024896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.843124649813853e-05, | |
| "loss": 0.0, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.9045643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.841514002630006e-05, | |
| "loss": 0.0, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 0.9066390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.839895296412197e-05, | |
| "loss": 0.0, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 0.9087136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.838268534556275e-05, | |
| "loss": 0.0, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 0.9107883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.836633720474991e-05, | |
| "loss": 0.0, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 0.9128630705394191, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.83499085759799e-05, | |
| "loss": 0.0, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.9149377593360996, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.833339949371798e-05, | |
| "loss": 0.0, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 0.91701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.831680999259825e-05, | |
| "loss": 0.0, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 0.9190871369294605, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.830014010742344e-05, | |
| "loss": 0.0, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 0.921161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.828338987316501e-05, | |
| "loss": 0.0, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.9232365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.826655932496292e-05, | |
| "loss": 0.0, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.9253112033195021, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.824964849812562e-05, | |
| "loss": 0.0, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 0.9273858921161826, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.823265742813e-05, | |
| "loss": 0.0, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 0.9294605809128631, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.821558615062127e-05, | |
| "loss": 0.0, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.9315352697095436, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.819843470141293e-05, | |
| "loss": 0.0, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 0.9336099585062241, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.818120311648663e-05, | |
| "loss": 0.0, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.9356846473029046, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.81638914319922e-05, | |
| "loss": 0.0, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 0.9377593360995851, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.814649968424745e-05, | |
| "loss": 0.0, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 0.9398340248962656, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.812902790973816e-05, | |
| "loss": 0.0, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 0.941908713692946, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.811147614511803e-05, | |
| "loss": 0.0, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 0.9439834024896265, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.809384442720854e-05, | |
| "loss": 0.0, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.946058091286307, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.807613279299892e-05, | |
| "loss": 0.0, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 0.9481327800829875, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.805834127964604e-05, | |
| "loss": 0.0, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 0.950207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.804046992447435e-05, | |
| "loss": 0.0, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 0.9522821576763485, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.80225187649758e-05, | |
| "loss": 0.0, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 0.9543568464730291, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.800448783880977e-05, | |
| "loss": 0.0, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.9564315352697096, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.798637718380297e-05, | |
| "loss": 0.0, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 0.9585062240663901, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.796818683794938e-05, | |
| "loss": 0.0, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 0.9605809128630706, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.794991683941015e-05, | |
| "loss": 0.0, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 0.9626556016597511, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.793156722651353e-05, | |
| "loss": 0.0, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.9647302904564315, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.791313803775481e-05, | |
| "loss": 0.0, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.966804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.789462931179622e-05, | |
| "loss": 0.0, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 0.9688796680497925, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.787604108746681e-05, | |
| "loss": 0.0, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 0.970954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.785737340376246e-05, | |
| "loss": 0.0, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 0.9730290456431535, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.783862629984571e-05, | |
| "loss": 0.0, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 0.975103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.781979981504574e-05, | |
| "loss": 0.0, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.9771784232365145, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.780089398885825e-05, | |
| "loss": 0.0, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 0.979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.778190886094537e-05, | |
| "loss": 0.0, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 0.9813278008298755, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.776284447113563e-05, | |
| "loss": 0.0, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 0.983402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.77437008594238e-05, | |
| "loss": 0.0, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 0.9854771784232366, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.77244780659709e-05, | |
| "loss": 0.0, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.9875518672199171, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.770517613110403e-05, | |
| "loss": 0.0, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 0.9896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.768579509531634e-05, | |
| "loss": 0.0, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 0.991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.766633499926688e-05, | |
| "loss": 0.0, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 0.9937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.76467958837806e-05, | |
| "loss": 0.0, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 0.995850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.762717778984823e-05, | |
| "loss": 0.0, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.9979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.760748075862614e-05, | |
| "loss": 0.0, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.758770483143634e-05, | |
| "loss": 0.0, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 1.0020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.756785004976636e-05, | |
| "loss": 0.0, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 1.004149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.754791645526913e-05, | |
| "loss": 0.0, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 1.0062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.752790408976294e-05, | |
| "loss": 0.0, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 1.008298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.750781299523132e-05, | |
| "loss": 0.0, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 1.0103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.748764321382296e-05, | |
| "loss": 0.0, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 1.012448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.746739478785163e-05, | |
| "loss": 0.0, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 1.0145228215767634, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.744706775979611e-05, | |
| "loss": 0.0, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 1.016597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.742666217230005e-05, | |
| "loss": 0.0, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.0186721991701244, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.74061780681719e-05, | |
| "loss": 0.0, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 1.020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.738561549038487e-05, | |
| "loss": 0.0, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 1.0228215767634854, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.736497448207676e-05, | |
| "loss": 0.0, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 1.0248962655601659, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.734425508654993e-05, | |
| "loss": 0.0, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 1.0269709543568464, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.732345734727116e-05, | |
| "loss": 0.0, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 1.0290456431535269, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.730258130787162e-05, | |
| "loss": 0.0, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 1.0311203319502074, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.72816270121467e-05, | |
| "loss": 0.0, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 1.033195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.726059450405604e-05, | |
| "loss": 0.0, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 1.0352697095435686, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.723948382772327e-05, | |
| "loss": 0.0, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 1.037344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.721829502743607e-05, | |
| "loss": 0.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0394190871369295, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.719702814764597e-05, | |
| "loss": 0.0, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 1.04149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.717568323296834e-05, | |
| "loss": 0.0, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 1.0435684647302905, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.715426032818223e-05, | |
| "loss": 0.0, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 1.045643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.713275947823033e-05, | |
| "loss": 0.0, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 1.0477178423236515, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.711118072821884e-05, | |
| "loss": 0.0, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 1.049792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.708952412341735e-05, | |
| "loss": 0.0, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 1.0518672199170125, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.706778970925883e-05, | |
| "loss": 0.0, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 1.053941908713693, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.704597753133946e-05, | |
| "loss": 0.0, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 1.0560165975103735, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.702408763541858e-05, | |
| "loss": 0.0, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 1.058091286307054, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.700212006741854e-05, | |
| "loss": 0.0, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.0601659751037344, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.698007487342464e-05, | |
| "loss": 0.0, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 1.062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.695795209968508e-05, | |
| "loss": 0.0, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 1.0643153526970954, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.693575179261076e-05, | |
| "loss": 0.0, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 1.066390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.691347399877522e-05, | |
| "loss": 0.0, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 1.0684647302904564, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.689111876491464e-05, | |
| "loss": 0.0, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 1.070539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.686868613792759e-05, | |
| "loss": 0.0, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 1.0726141078838174, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.684617616487501e-05, | |
| "loss": 0.0, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 1.0746887966804979, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.682358889298012e-05, | |
| "loss": 0.0, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 1.0767634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.680092436962831e-05, | |
| "loss": 0.0, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 1.0788381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.677818264236701e-05, | |
| "loss": 0.0, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.0809128630705394, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.675536375890561e-05, | |
| "loss": 0.0, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 1.0829875518672198, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.673246776711543e-05, | |
| "loss": 0.0, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 1.0850622406639003, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.670949471502944e-05, | |
| "loss": 0.0, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 1.0871369294605808, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.66864446508424e-05, | |
| "loss": 0.0, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 1.0892116182572613, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.666331762291052e-05, | |
| "loss": 0.0, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 1.0912863070539418, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.664011367975156e-05, | |
| "loss": 0.0, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 1.0933609958506225, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.661683287004458e-05, | |
| "loss": 0.0, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 1.095435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.659347524262991e-05, | |
| "loss": 0.0, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 1.0975103734439835, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.657004084650906e-05, | |
| "loss": 0.0, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 1.099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.654652973084455e-05, | |
| "loss": 0.0, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.1016597510373445, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.652294194495988e-05, | |
| "loss": 0.0, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 1.103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.64992775383394e-05, | |
| "loss": 0.0, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 1.1058091286307055, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.647553656062815e-05, | |
| "loss": 0.0, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 1.107883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.64517190616319e-05, | |
| "loss": 0.0, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 1.1099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.642782509131685e-05, | |
| "loss": 0.0, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 1.112033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.640385469980967e-05, | |
| "loss": 0.0, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 1.1141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.63798079373974e-05, | |
| "loss": 0.0, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 1.116182572614108, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.635568485452721e-05, | |
| "loss": 0.0, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 1.1182572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.633148550180644e-05, | |
| "loss": 0.0, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 1.120331950207469, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.630720993000242e-05, | |
| "loss": 0.0, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.1224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.628285819004237e-05, | |
| "loss": 0.0, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 1.1244813278008299, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.625843033301334e-05, | |
| "loss": 0.0, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 1.1265560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.623392641016198e-05, | |
| "loss": 0.0, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 1.1286307053941909, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.620934647289462e-05, | |
| "loss": 0.0, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 1.1307053941908713, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.618469057277698e-05, | |
| "loss": 0.0, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 1.1327800829875518, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.615995876153418e-05, | |
| "loss": 0.0, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 1.1348547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.613515109105059e-05, | |
| "loss": 0.0, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 1.1369294605809128, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.61102676133697e-05, | |
| "loss": 0.0, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 1.1390041493775933, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.608530838069407e-05, | |
| "loss": 0.0, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 1.1410788381742738, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.606027344538514e-05, | |
| "loss": 0.0, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.1431535269709543, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.603516285996321e-05, | |
| "loss": 0.0, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 1.1452282157676348, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.600997667710728e-05, | |
| "loss": 0.0, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 1.1473029045643153, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.59847149496549e-05, | |
| "loss": 0.0, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 1.1493775933609958, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.595937773060214e-05, | |
| "loss": 0.0, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 1.1514522821576763, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.593396507310344e-05, | |
| "loss": 0.0, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 1.1535269709543567, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.590847703047151e-05, | |
| "loss": 0.0, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 1.1556016597510372, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.588291365617718e-05, | |
| "loss": 0.0, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 1.1576763485477177, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.585727500384933e-05, | |
| "loss": 0.0, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 1.1597510373443982, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.583156112727477e-05, | |
| "loss": 0.0, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 1.161825726141079, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.580577208039812e-05, | |
| "loss": 0.0, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.1639004149377594, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.57799079173217e-05, | |
| "loss": 0.0, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 1.16597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.57539686923054e-05, | |
| "loss": 0.0, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 1.1680497925311204, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.572795445976661e-05, | |
| "loss": 0.0, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 1.170124481327801, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.570186527428004e-05, | |
| "loss": 0.0, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 1.1721991701244814, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.567570119057767e-05, | |
| "loss": 0.0, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 1.1742738589211619, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.564946226354859e-05, | |
| "loss": 0.0, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 1.1763485477178424, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.562314854823891e-05, | |
| "loss": 0.0, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 1.1784232365145229, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.559676009985164e-05, | |
| "loss": 0.0, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 1.1804979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.557029697374658e-05, | |
| "loss": 0.0, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 1.1825726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.554375922544017e-05, | |
| "loss": 0.0, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.1846473029045643, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.551714691060543e-05, | |
| "loss": 0.0, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 1.1867219917012448, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.549046008507176e-05, | |
| "loss": 0.0, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 1.1887966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.546369880482494e-05, | |
| "loss": 0.0, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 1.1908713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.54368631260069e-05, | |
| "loss": 0.0, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 1.1929460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.540995310491565e-05, | |
| "loss": 0.0, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 1.1950207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.53829687980052e-05, | |
| "loss": 0.0, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 1.1970954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.535591026188537e-05, | |
| "loss": 0.0, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 1.1991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.53287775533217e-05, | |
| "loss": 0.0, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 1.2012448132780082, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.530157072923537e-05, | |
| "loss": 0.0, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 1.2033195020746887, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.5274289846703e-05, | |
| "loss": 0.0, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.2053941908713692, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.52469349629566e-05, | |
| "loss": 0.0, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 1.2074688796680497, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.521950613538344e-05, | |
| "loss": 0.0, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 1.2095435684647302, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.519200342152589e-05, | |
| "loss": 0.0, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 1.2116182572614107, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.516442687908132e-05, | |
| "loss": 0.0, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 1.2136929460580912, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.5136776565902e-05, | |
| "loss": 0.0, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 1.215767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.510905253999498e-05, | |
| "loss": 0.0, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 1.2178423236514524, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.508125485952191e-05, | |
| "loss": 0.0, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 1.2199170124481329, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.505338358279898e-05, | |
| "loss": 0.0, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 1.2219917012448134, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.502543876829677e-05, | |
| "loss": 0.0, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 1.2240663900414939, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.499742047464016e-05, | |
| "loss": 0.0, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.2261410788381744, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.496932876060813e-05, | |
| "loss": 0.0, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 1.2282157676348548, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.494116368513372e-05, | |
| "loss": 0.0, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 1.2302904564315353, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.491292530730387e-05, | |
| "loss": 0.0, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 1.2323651452282158, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.488461368635932e-05, | |
| "loss": 0.0, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 1.2344398340248963, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.485622888169442e-05, | |
| "loss": 0.0, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 1.2365145228215768, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.482777095285706e-05, | |
| "loss": 0.0, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 1.2385892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.479923995954858e-05, | |
| "loss": 0.0, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 1.2406639004149378, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.477063596162355e-05, | |
| "loss": 0.0, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 1.2427385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.474195901908974e-05, | |
| "loss": 0.0, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 1.2448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.471320919210786e-05, | |
| "loss": 0.0, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.2468879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.468438654099166e-05, | |
| "loss": 0.0, | |
| "step": 601 | |
| }, | |
| { | |
| "epoch": 1.2489626556016598, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.465549112620756e-05, | |
| "loss": 0.0, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 1.2510373443983402, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.462652300837466e-05, | |
| "loss": 0.0, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 1.2531120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.459748224826459e-05, | |
| "loss": 0.0, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 1.2551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.456836890680136e-05, | |
| "loss": 0.0, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 1.2572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.453918304506127e-05, | |
| "loss": 0.0, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 1.2593360995850622, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.450992472427274e-05, | |
| "loss": 0.0, | |
| "step": 607 | |
| }, | |
| { | |
| "epoch": 1.2614107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.44805940058162e-05, | |
| "loss": 0.0, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 1.2634854771784232, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.445119095122397e-05, | |
| "loss": 0.0, | |
| "step": 609 | |
| }, | |
| { | |
| "epoch": 1.2655601659751037, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.442171562218012e-05, | |
| "loss": 0.0, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.2676348547717842, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.439216808052034e-05, | |
| "loss": 0.0, | |
| "step": 611 | |
| }, | |
| { | |
| "epoch": 1.2697095435684647, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.436254838823181e-05, | |
| "loss": 0.0, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 1.2717842323651452, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.433285660745311e-05, | |
| "loss": 0.0, | |
| "step": 613 | |
| }, | |
| { | |
| "epoch": 1.2738589211618256, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.430309280047398e-05, | |
| "loss": 0.0, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 1.2759336099585061, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.427325702973533e-05, | |
| "loss": 0.0, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 1.2780082987551866, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.4243349357829e-05, | |
| "loss": 0.0, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 1.2800829875518671, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.421336984749771e-05, | |
| "loss": 0.0, | |
| "step": 617 | |
| }, | |
| { | |
| "epoch": 1.2821576763485476, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.418331856163483e-05, | |
| "loss": 0.0, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 1.284232365145228, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.415319556328436e-05, | |
| "loss": 0.0, | |
| "step": 619 | |
| }, | |
| { | |
| "epoch": 1.2863070539419086, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.412300091564073e-05, | |
| "loss": 0.0, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.288381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.409273468204866e-05, | |
| "loss": 0.0, | |
| "step": 621 | |
| }, | |
| { | |
| "epoch": 1.2904564315352698, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.406239692600308e-05, | |
| "loss": 0.0, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 1.2925311203319503, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.403198771114894e-05, | |
| "loss": 0.0, | |
| "step": 623 | |
| }, | |
| { | |
| "epoch": 1.2946058091286308, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.400150710128112e-05, | |
| "loss": 0.0, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 1.2966804979253113, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.397095516034426e-05, | |
| "loss": 0.0, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.2987551867219918, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.394033195243266e-05, | |
| "loss": 0.0, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 1.3008298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.390963754179014e-05, | |
| "loss": 0.0, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 1.3029045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.387887199280985e-05, | |
| "loss": 0.0, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 1.3049792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.384803537003423e-05, | |
| "loss": 0.0, | |
| "step": 629 | |
| }, | |
| { | |
| "epoch": 1.3070539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.381712773815479e-05, | |
| "loss": 0.0, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.3091286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.378614916201203e-05, | |
| "loss": 0.0, | |
| "step": 631 | |
| }, | |
| { | |
| "epoch": 1.3112033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.375509970659526e-05, | |
| "loss": 0.0, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 1.3132780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.372397943704252e-05, | |
| "loss": 0.0, | |
| "step": 633 | |
| }, | |
| { | |
| "epoch": 1.3153526970954357, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.369278841864038e-05, | |
| "loss": 0.0, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 1.3174273858921162, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.366152671682384e-05, | |
| "loss": 0.0, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 1.3195020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.363019439717618e-05, | |
| "loss": 0.0, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 1.3215767634854771, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.359879152542885e-05, | |
| "loss": 0.0, | |
| "step": 637 | |
| }, | |
| { | |
| "epoch": 1.3236514522821576, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.356731816746129e-05, | |
| "loss": 0.0, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 1.3257261410788381, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.353577438930082e-05, | |
| "loss": 0.0, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 1.3278008298755186, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.350416025712247e-05, | |
| "loss": 0.0, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.329875518672199, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.347247583724891e-05, | |
| "loss": 0.0, | |
| "step": 641 | |
| }, | |
| { | |
| "epoch": 1.3319502074688796, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.344072119615022e-05, | |
| "loss": 0.0, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 1.33402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.340889640044384e-05, | |
| "loss": 0.0, | |
| "step": 643 | |
| }, | |
| { | |
| "epoch": 1.3360995850622408, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.337700151689432e-05, | |
| "loss": 0.0, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 1.3381742738589213, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.334503661241332e-05, | |
| "loss": 0.0, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 1.3402489626556018, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.331300175405935e-05, | |
| "loss": 0.0, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 1.3423236514522823, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.328089700903768e-05, | |
| "loss": 0.0, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 1.3443983402489628, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.324872244470021e-05, | |
| "loss": 0.0, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 1.3464730290456433, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.321647812854531e-05, | |
| "loss": 0.0, | |
| "step": 649 | |
| }, | |
| { | |
| "epoch": 1.3485477178423237, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.318416412821766e-05, | |
| "loss": 0.0, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.3506224066390042, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.315178051150816e-05, | |
| "loss": 0.0, | |
| "step": 651 | |
| }, | |
| { | |
| "epoch": 1.3526970954356847, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.311932734635371e-05, | |
| "loss": 0.0, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 1.3547717842323652, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.308680470083716e-05, | |
| "loss": 0.0, | |
| "step": 653 | |
| }, | |
| { | |
| "epoch": 1.3568464730290457, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.305421264318713e-05, | |
| "loss": 0.0, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 1.3589211618257262, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.302155124177779e-05, | |
| "loss": 0.0, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 1.3609958506224067, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.298882056512885e-05, | |
| "loss": 0.0, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 1.3630705394190872, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.295602068190534e-05, | |
| "loss": 0.0, | |
| "step": 657 | |
| }, | |
| { | |
| "epoch": 1.3651452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.292315166091743e-05, | |
| "loss": 0.0, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 1.3672199170124482, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.289021357112044e-05, | |
| "loss": 0.0, | |
| "step": 659 | |
| }, | |
| { | |
| "epoch": 1.3692946058091287, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.285720648161444e-05, | |
| "loss": 0.0, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.3713692946058091, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.282413046164437e-05, | |
| "loss": 0.0, | |
| "step": 661 | |
| }, | |
| { | |
| "epoch": 1.3734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.279098558059973e-05, | |
| "loss": 0.0, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 1.3755186721991701, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.27577719080145e-05, | |
| "loss": 0.0, | |
| "step": 663 | |
| }, | |
| { | |
| "epoch": 1.3775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.272448951356696e-05, | |
| "loss": 0.0, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 1.379668049792531, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.269113846707957e-05, | |
| "loss": 0.0, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 1.3817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.26577188385188e-05, | |
| "loss": 0.0, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 1.383817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.262423069799502e-05, | |
| "loss": 0.0, | |
| "step": 667 | |
| }, | |
| { | |
| "epoch": 1.3858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.259067411576231e-05, | |
| "loss": 0.0, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 1.387966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.255704916221835e-05, | |
| "loss": 0.0, | |
| "step": 669 | |
| }, | |
| { | |
| "epoch": 1.3900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.252335590790423e-05, | |
| "loss": 0.0, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.392116182572614, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.248959442350436e-05, | |
| "loss": 0.0, | |
| "step": 671 | |
| }, | |
| { | |
| "epoch": 1.3941908713692945, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.245576477984626e-05, | |
| "loss": 0.0, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 1.396265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.242186704790044e-05, | |
| "loss": 0.0, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 1.3983402489626555, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.238790129878029e-05, | |
| "loss": 0.0, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 1.400414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.235386760374182e-05, | |
| "loss": 0.0, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.4024896265560165, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.231976603418367e-05, | |
| "loss": 0.0, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 1.404564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.228559666164682e-05, | |
| "loss": 0.0, | |
| "step": 677 | |
| }, | |
| { | |
| "epoch": 1.4066390041493775, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.225135955781447e-05, | |
| "loss": 0.0, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 1.408713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.221705479451196e-05, | |
| "loss": 0.0, | |
| "step": 679 | |
| }, | |
| { | |
| "epoch": 1.4107883817427385, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.218268244370657e-05, | |
| "loss": 0.0, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.412863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.214824257750733e-05, | |
| "loss": 0.0, | |
| "step": 681 | |
| }, | |
| { | |
| "epoch": 1.4149377593360997, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.211373526816495e-05, | |
| "loss": 0.0, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 1.4170124481327802, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.207916058807162e-05, | |
| "loss": 0.0, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 1.4190871369294606, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.204451860976082e-05, | |
| "loss": 0.0, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 1.4211618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.200980940590727e-05, | |
| "loss": 0.0, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 1.4232365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.197503304932673e-05, | |
| "loss": 0.0, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 1.4253112033195021, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.194018961297576e-05, | |
| "loss": 0.0, | |
| "step": 687 | |
| }, | |
| { | |
| "epoch": 1.4273858921161826, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.190527916995173e-05, | |
| "loss": 0.0, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 1.429460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.187030179349254e-05, | |
| "loss": 0.0, | |
| "step": 689 | |
| }, | |
| { | |
| "epoch": 1.4315352697095436, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.18352575569765e-05, | |
| "loss": 0.0, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.433609958506224, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.180014653392223e-05, | |
| "loss": 0.0, | |
| "step": 691 | |
| }, | |
| { | |
| "epoch": 1.4356846473029046, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.17649687979884e-05, | |
| "loss": 0.0, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 1.437759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.172972442297369e-05, | |
| "loss": 0.0, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 1.4398340248962656, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.169441348281654e-05, | |
| "loss": 0.0, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 1.441908713692946, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.165903605159504e-05, | |
| "loss": 0.0, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 1.4439834024896265, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.162359220352682e-05, | |
| "loss": 0.0, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 1.446058091286307, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.158808201296877e-05, | |
| "loss": 0.0, | |
| "step": 697 | |
| }, | |
| { | |
| "epoch": 1.4481327800829875, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.155250555441702e-05, | |
| "loss": 0.0, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 1.450207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.151686290250669e-05, | |
| "loss": 0.0, | |
| "step": 699 | |
| }, | |
| { | |
| "epoch": 1.4522821576763485, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.148115413201176e-05, | |
| "loss": 0.0, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.454356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.144537931784495e-05, | |
| "loss": 0.0, | |
| "step": 701 | |
| }, | |
| { | |
| "epoch": 1.4564315352697095, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.140953853505751e-05, | |
| "loss": 0.0, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 1.45850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.137363185883909e-05, | |
| "loss": 0.0, | |
| "step": 703 | |
| }, | |
| { | |
| "epoch": 1.4605809128630705, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.133765936451756e-05, | |
| "loss": 0.0, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 1.4626556016597512, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.130162112755889e-05, | |
| "loss": 0.0, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 1.4647302904564317, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.126551722356697e-05, | |
| "loss": 0.0, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 1.4668049792531122, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.122934772828344e-05, | |
| "loss": 0.0, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 1.4688796680497926, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.119311271758755e-05, | |
| "loss": 0.0, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 1.4709543568464731, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.1156812267496e-05, | |
| "loss": 0.0, | |
| "step": 709 | |
| }, | |
| { | |
| "epoch": 1.4730290456431536, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.112044645416273e-05, | |
| "loss": 0.0, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.4751037344398341, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.108401535387888e-05, | |
| "loss": 0.0, | |
| "step": 711 | |
| }, | |
| { | |
| "epoch": 1.4771784232365146, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.104751904307249e-05, | |
| "loss": 0.0, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 1.479253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.101095759830843e-05, | |
| "loss": 0.0, | |
| "step": 713 | |
| }, | |
| { | |
| "epoch": 1.4813278008298756, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.09743310962882e-05, | |
| "loss": 0.0, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 1.483402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.093763961384981e-05, | |
| "loss": 0.0, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 1.4854771784232366, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.090088322796759e-05, | |
| "loss": 0.0, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 1.487551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.086406201575198e-05, | |
| "loss": 0.0, | |
| "step": 717 | |
| }, | |
| { | |
| "epoch": 1.4896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.082717605444947e-05, | |
| "loss": 0.0, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 1.491701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.079022542144234e-05, | |
| "loss": 0.0, | |
| "step": 719 | |
| }, | |
| { | |
| "epoch": 1.4937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.07532101942486e-05, | |
| "loss": 0.0, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.495850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.071613045052173e-05, | |
| "loss": 0.0, | |
| "step": 721 | |
| }, | |
| { | |
| "epoch": 1.4979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.067898626805056e-05, | |
| "loss": 0.0, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.064177772475912e-05, | |
| "loss": 0.0, | |
| "step": 723 | |
| }, | |
| { | |
| "epoch": 1.5020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.060450489870647e-05, | |
| "loss": 0.0, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 1.504149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.05671678680865e-05, | |
| "loss": 0.0, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.5062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.05297667112278e-05, | |
| "loss": 0.0, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 1.508298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.04923015065935e-05, | |
| "loss": 0.0, | |
| "step": 727 | |
| }, | |
| { | |
| "epoch": 1.5103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.04547723327811e-05, | |
| "loss": 0.0, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 1.512448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.041717926852228e-05, | |
| "loss": 0.0, | |
| "step": 729 | |
| }, | |
| { | |
| "epoch": 1.5145228215767634, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.037952239268275e-05, | |
| "loss": 0.0, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.516597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.034180178426212e-05, | |
| "loss": 0.0, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 1.5186721991701244, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.030401752239369e-05, | |
| "loss": 0.0, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 1.520746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.026616968634428e-05, | |
| "loss": 0.0, | |
| "step": 733 | |
| }, | |
| { | |
| "epoch": 1.5228215767634854, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.02282583555141e-05, | |
| "loss": 0.0, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 1.5248962655601659, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.019028360943656e-05, | |
| "loss": 0.0, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 1.5269709543568464, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.01522455277781e-05, | |
| "loss": 0.0, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 1.5290456431535269, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.011414419033804e-05, | |
| "loss": 0.0, | |
| "step": 737 | |
| }, | |
| { | |
| "epoch": 1.5311203319502074, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.00759796770484e-05, | |
| "loss": 0.0, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 1.5331950207468878, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.003775206797374e-05, | |
| "loss": 0.0, | |
| "step": 739 | |
| }, | |
| { | |
| "epoch": 1.5352697095435683, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.999946144331096e-05, | |
| "loss": 0.0, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.5373443983402488, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.996110788338921e-05, | |
| "loss": 0.0, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 1.5394190871369293, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.992269146866963e-05, | |
| "loss": 0.0, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 1.5414937759336098, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.988421227974524e-05, | |
| "loss": 0.0, | |
| "step": 743 | |
| }, | |
| { | |
| "epoch": 1.5435684647302903, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.984567039734074e-05, | |
| "loss": 0.0, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 1.5456431535269708, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.980706590231237e-05, | |
| "loss": 0.0, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 1.5477178423236515, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.97683988756477e-05, | |
| "loss": 0.0, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 1.549792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.972966939846547e-05, | |
| "loss": 0.0, | |
| "step": 747 | |
| }, | |
| { | |
| "epoch": 1.5518672199170125, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.969087755201553e-05, | |
| "loss": 0.0, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 1.553941908713693, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.965202341767844e-05, | |
| "loss": 0.0, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 1.5560165975103735, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.961310707696555e-05, | |
| "loss": 0.0, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.558091286307054, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.95741286115186e-05, | |
| "loss": 0.0, | |
| "step": 751 | |
| }, | |
| { | |
| "epoch": 1.5601659751037344, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.953508810310979e-05, | |
| "loss": 0.0, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 1.562240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.949598563364134e-05, | |
| "loss": 0.0, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 1.5643153526970954, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.945682128514557e-05, | |
| "loss": 0.0, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 1.566390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.941759513978454e-05, | |
| "loss": 0.0, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 1.5684647302904564, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.937830727985001e-05, | |
| "loss": 0.0, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 1.570539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.933895778776314e-05, | |
| "loss": 0.0, | |
| "step": 757 | |
| }, | |
| { | |
| "epoch": 1.5726141078838174, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.929954674607447e-05, | |
| "loss": 0.0, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 1.5746887966804979, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.926007423746357e-05, | |
| "loss": 0.0, | |
| "step": 759 | |
| }, | |
| { | |
| "epoch": 1.5767634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.922054034473908e-05, | |
| "loss": 0.0, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.578838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.91809451508383e-05, | |
| "loss": 0.0, | |
| "step": 761 | |
| }, | |
| { | |
| "epoch": 1.5809128630705396, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.91412887388272e-05, | |
| "loss": 0.0, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 1.58298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.910157119190014e-05, | |
| "loss": 0.0, | |
| "step": 763 | |
| }, | |
| { | |
| "epoch": 1.5850622406639006, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.906179259337976e-05, | |
| "loss": 0.0, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 1.587136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.902195302671679e-05, | |
| "loss": 0.0, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 1.5892116182572615, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.898205257548983e-05, | |
| "loss": 0.0, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 1.591286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.894209132340521e-05, | |
| "loss": 0.0, | |
| "step": 767 | |
| }, | |
| { | |
| "epoch": 1.5933609958506225, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.890206935429685e-05, | |
| "loss": 0.0, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 1.595435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.886198675212602e-05, | |
| "loss": 0.0, | |
| "step": 769 | |
| }, | |
| { | |
| "epoch": 1.5975103734439835, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.882184360098118e-05, | |
| "loss": 0.0, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.599585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.878163998507785e-05, | |
| "loss": 0.0, | |
| "step": 771 | |
| }, | |
| { | |
| "epoch": 1.6016597510373445, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.874137598875835e-05, | |
| "loss": 0.0, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 1.603734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.870105169649172e-05, | |
| "loss": 0.0, | |
| "step": 773 | |
| }, | |
| { | |
| "epoch": 1.6058091286307055, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.866066719287347e-05, | |
| "loss": 0.0, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 1.607883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.862022256262541e-05, | |
| "loss": 0.0, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.6099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.857971789059553e-05, | |
| "loss": 0.0, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 1.612033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.853915326175773e-05, | |
| "loss": 0.0, | |
| "step": 777 | |
| }, | |
| { | |
| "epoch": 1.6141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.849852876121174e-05, | |
| "loss": 0.0, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 1.616182572614108, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.845784447418287e-05, | |
| "loss": 0.0, | |
| "step": 779 | |
| }, | |
| { | |
| "epoch": 1.6182572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.841710048602183e-05, | |
| "loss": 0.0, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.620331950207469, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.837629688220464e-05, | |
| "loss": 0.0, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 1.6224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.833543374833232e-05, | |
| "loss": 0.0, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 1.6244813278008299, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.829451117013083e-05, | |
| "loss": 0.0, | |
| "step": 783 | |
| }, | |
| { | |
| "epoch": 1.6265560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.82535292334508e-05, | |
| "loss": 0.0, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 1.6286307053941909, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.821248802426739e-05, | |
| "loss": 0.0, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 1.6307053941908713, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.817138762868014e-05, | |
| "loss": 0.0, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 1.6327800829875518, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.813022813291273e-05, | |
| "loss": 0.0, | |
| "step": 787 | |
| }, | |
| { | |
| "epoch": 1.6348547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.80890096233128e-05, | |
| "loss": 0.0, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 1.6369294605809128, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.804773218635185e-05, | |
| "loss": 0.0, | |
| "step": 789 | |
| }, | |
| { | |
| "epoch": 1.6390041493775933, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.800639590862496e-05, | |
| "loss": 0.0, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.6410788381742738, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.796500087685067e-05, | |
| "loss": 0.0, | |
| "step": 791 | |
| }, | |
| { | |
| "epoch": 1.6431535269709543, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.792354717787078e-05, | |
| "loss": 0.0, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 1.6452282157676348, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.788203489865014e-05, | |
| "loss": 0.0, | |
| "step": 793 | |
| }, | |
| { | |
| "epoch": 1.6473029045643153, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.784046412627653e-05, | |
| "loss": 0.0, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 1.6493775933609958, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.779883494796044e-05, | |
| "loss": 0.0, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 1.6514522821576763, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.775714745103484e-05, | |
| "loss": 0.0, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 1.6535269709543567, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.771540172295511e-05, | |
| "loss": 0.0, | |
| "step": 797 | |
| }, | |
| { | |
| "epoch": 1.6556016597510372, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.767359785129875e-05, | |
| "loss": 0.0, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 1.6576763485477177, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.763173592376525e-05, | |
| "loss": 0.0, | |
| "step": 799 | |
| }, | |
| { | |
| "epoch": 1.6597510373443982, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.758981602817592e-05, | |
| "loss": 0.0, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.6618257261410787, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.75478382524736e-05, | |
| "loss": 0.0, | |
| "step": 801 | |
| }, | |
| { | |
| "epoch": 1.6639004149377592, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.750580268472267e-05, | |
| "loss": 0.0, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 1.6659751037344397, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.746370941310865e-05, | |
| "loss": 0.0, | |
| "step": 803 | |
| }, | |
| { | |
| "epoch": 1.6680497925311202, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.742155852593818e-05, | |
| "loss": 0.0, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 1.6701244813278007, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.737935011163873e-05, | |
| "loss": 0.0, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 1.6721991701244814, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.73370842587585e-05, | |
| "loss": 0.0, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 1.6742738589211619, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.729476105596611e-05, | |
| "loss": 0.0, | |
| "step": 807 | |
| }, | |
| { | |
| "epoch": 1.6763485477178424, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.72523805920506e-05, | |
| "loss": 0.0, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 1.6784232365145229, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.720994295592107e-05, | |
| "loss": 0.0, | |
| "step": 809 | |
| }, | |
| { | |
| "epoch": 1.6804979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.716744823660654e-05, | |
| "loss": 0.0, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.6825726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.712489652325586e-05, | |
| "loss": 0.0, | |
| "step": 811 | |
| }, | |
| { | |
| "epoch": 1.6846473029045643, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.708228790513737e-05, | |
| "loss": 0.0, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 1.6867219917012448, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.703962247163883e-05, | |
| "loss": 0.0, | |
| "step": 813 | |
| }, | |
| { | |
| "epoch": 1.6887966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.699690031226718e-05, | |
| "loss": 0.0, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 1.6908713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.695412151664838e-05, | |
| "loss": 0.0, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 1.6929460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.69112861745272e-05, | |
| "loss": 0.0, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 1.6950207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.686839437576701e-05, | |
| "loss": 0.0, | |
| "step": 817 | |
| }, | |
| { | |
| "epoch": 1.6970954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.682544621034965e-05, | |
| "loss": 0.0, | |
| "step": 818 | |
| }, | |
| { | |
| "epoch": 1.6991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.67824417683752e-05, | |
| "loss": 0.0, | |
| "step": 819 | |
| }, | |
| { | |
| "epoch": 1.7012448132780082, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.673938114006181e-05, | |
| "loss": 0.0, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.703319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.669626441574547e-05, | |
| "loss": 0.0, | |
| "step": 821 | |
| }, | |
| { | |
| "epoch": 1.7053941908713695, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.665309168587992e-05, | |
| "loss": 0.0, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 1.70746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.660986304103633e-05, | |
| "loss": 0.0, | |
| "step": 823 | |
| }, | |
| { | |
| "epoch": 1.7095435684647304, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.656657857190317e-05, | |
| "loss": 0.0, | |
| "step": 824 | |
| }, | |
| { | |
| "epoch": 1.711618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.652323836928607e-05, | |
| "loss": 0.0, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 1.7136929460580914, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.647984252410755e-05, | |
| "loss": 0.0, | |
| "step": 826 | |
| }, | |
| { | |
| "epoch": 1.715767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.643639112740688e-05, | |
| "loss": 0.0, | |
| "step": 827 | |
| }, | |
| { | |
| "epoch": 1.7178423236514524, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.639288427033986e-05, | |
| "loss": 0.0, | |
| "step": 828 | |
| }, | |
| { | |
| "epoch": 1.7199170124481329, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.634932204417864e-05, | |
| "loss": 0.0, | |
| "step": 829 | |
| }, | |
| { | |
| "epoch": 1.7219917012448134, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.630570454031149e-05, | |
| "loss": 0.0, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.7240663900414939, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.626203185024274e-05, | |
| "loss": 0.0, | |
| "step": 831 | |
| }, | |
| { | |
| "epoch": 1.7261410788381744, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.62183040655924e-05, | |
| "loss": 0.0, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 1.7282157676348548, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.617452127809611e-05, | |
| "loss": 0.0, | |
| "step": 833 | |
| }, | |
| { | |
| "epoch": 1.7302904564315353, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.613068357960489e-05, | |
| "loss": 0.0, | |
| "step": 834 | |
| }, | |
| { | |
| "epoch": 1.7323651452282158, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.608679106208493e-05, | |
| "loss": 0.0, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 1.7344398340248963, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.604284381761748e-05, | |
| "loss": 0.0, | |
| "step": 836 | |
| }, | |
| { | |
| "epoch": 1.7365145228215768, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.599884193839856e-05, | |
| "loss": 0.0, | |
| "step": 837 | |
| }, | |
| { | |
| "epoch": 1.7385892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.59547855167388e-05, | |
| "loss": 0.0, | |
| "step": 838 | |
| }, | |
| { | |
| "epoch": 1.7406639004149378, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.59106746450633e-05, | |
| "loss": 0.0, | |
| "step": 839 | |
| }, | |
| { | |
| "epoch": 1.7427385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.586650941591133e-05, | |
| "loss": 0.0, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.7448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.582228992193624e-05, | |
| "loss": 0.0, | |
| "step": 841 | |
| }, | |
| { | |
| "epoch": 1.7468879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.577801625590521e-05, | |
| "loss": 0.0, | |
| "step": 842 | |
| }, | |
| { | |
| "epoch": 1.7489626556016598, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.573368851069906e-05, | |
| "loss": 0.0, | |
| "step": 843 | |
| }, | |
| { | |
| "epoch": 1.7510373443983402, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.568930677931204e-05, | |
| "loss": 0.0, | |
| "step": 844 | |
| }, | |
| { | |
| "epoch": 1.7531120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.564487115485169e-05, | |
| "loss": 0.0, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 1.7551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.560038173053863e-05, | |
| "loss": 0.0, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 1.7572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.555583859970627e-05, | |
| "loss": 0.0, | |
| "step": 847 | |
| }, | |
| { | |
| "epoch": 1.7593360995850622, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.551124185580078e-05, | |
| "loss": 0.0, | |
| "step": 848 | |
| }, | |
| { | |
| "epoch": 1.7614107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.546659159238074e-05, | |
| "loss": 0.0, | |
| "step": 849 | |
| }, | |
| { | |
| "epoch": 1.7634854771784232, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.542188790311702e-05, | |
| "loss": 0.0, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.7655601659751037, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.53771308817926e-05, | |
| "loss": 0.0, | |
| "step": 851 | |
| }, | |
| { | |
| "epoch": 1.7676348547717842, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.533232062230233e-05, | |
| "loss": 0.0, | |
| "step": 852 | |
| }, | |
| { | |
| "epoch": 1.7697095435684647, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.52874572186527e-05, | |
| "loss": 0.0, | |
| "step": 853 | |
| }, | |
| { | |
| "epoch": 1.7717842323651452, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.524254076496175e-05, | |
| "loss": 0.0, | |
| "step": 854 | |
| }, | |
| { | |
| "epoch": 1.7738589211618256, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.519757135545881e-05, | |
| "loss": 0.0, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 1.7759336099585061, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.515254908448426e-05, | |
| "loss": 0.0, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 1.7780082987551866, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.510747404648944e-05, | |
| "loss": 0.0, | |
| "step": 857 | |
| }, | |
| { | |
| "epoch": 1.7800829875518671, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.506234633603631e-05, | |
| "loss": 0.0, | |
| "step": 858 | |
| }, | |
| { | |
| "epoch": 1.7821576763485476, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.501716604779741e-05, | |
| "loss": 0.0, | |
| "step": 859 | |
| }, | |
| { | |
| "epoch": 1.784232365145228, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.497193327655554e-05, | |
| "loss": 0.0, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.7863070539419086, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.49266481172036e-05, | |
| "loss": 0.0, | |
| "step": 861 | |
| }, | |
| { | |
| "epoch": 1.788381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.48813106647444e-05, | |
| "loss": 0.0, | |
| "step": 862 | |
| }, | |
| { | |
| "epoch": 1.7904564315352696, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.483592101429047e-05, | |
| "loss": 0.0, | |
| "step": 863 | |
| }, | |
| { | |
| "epoch": 1.79253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.479047926106383e-05, | |
| "loss": 0.0, | |
| "step": 864 | |
| }, | |
| { | |
| "epoch": 1.7946058091286305, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.47449855003958e-05, | |
| "loss": 0.0, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 1.796680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.469943982772682e-05, | |
| "loss": 0.0, | |
| "step": 866 | |
| }, | |
| { | |
| "epoch": 1.7987551867219918, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.465384233860623e-05, | |
| "loss": 0.0, | |
| "step": 867 | |
| }, | |
| { | |
| "epoch": 1.8008298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.460819312869208e-05, | |
| "loss": 0.0, | |
| "step": 868 | |
| }, | |
| { | |
| "epoch": 1.8029045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.45624922937509e-05, | |
| "loss": 0.0, | |
| "step": 869 | |
| }, | |
| { | |
| "epoch": 1.8049792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.451673992965755e-05, | |
| "loss": 0.0, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.8070539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.447093613239497e-05, | |
| "loss": 0.0, | |
| "step": 871 | |
| }, | |
| { | |
| "epoch": 1.8091286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.442508099805406e-05, | |
| "loss": 0.0, | |
| "step": 872 | |
| }, | |
| { | |
| "epoch": 1.8112033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.437917462283331e-05, | |
| "loss": 0.0, | |
| "step": 873 | |
| }, | |
| { | |
| "epoch": 1.8132780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.43332171030388e-05, | |
| "loss": 0.0, | |
| "step": 874 | |
| }, | |
| { | |
| "epoch": 1.8153526970954357, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.428720853508388e-05, | |
| "loss": 0.0, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 1.8174273858921162, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.424114901548896e-05, | |
| "loss": 0.0, | |
| "step": 876 | |
| }, | |
| { | |
| "epoch": 1.8195020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.41950386408814e-05, | |
| "loss": 0.0, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 1.8215767634854771, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.41488775079952e-05, | |
| "loss": 0.0, | |
| "step": 878 | |
| }, | |
| { | |
| "epoch": 1.8236514522821576, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.410266571367088e-05, | |
| "loss": 0.0, | |
| "step": 879 | |
| }, | |
| { | |
| "epoch": 1.8257261410788381, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.405640335485518e-05, | |
| "loss": 0.0, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.8278008298755186, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.401009052860099e-05, | |
| "loss": 0.0, | |
| "step": 881 | |
| }, | |
| { | |
| "epoch": 1.8298755186721993, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.396372733206705e-05, | |
| "loss": 0.0, | |
| "step": 882 | |
| }, | |
| { | |
| "epoch": 1.8319502074688798, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.391731386251774e-05, | |
| "loss": 0.0, | |
| "step": 883 | |
| }, | |
| { | |
| "epoch": 1.8340248962655603, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.387085021732296e-05, | |
| "loss": 0.0, | |
| "step": 884 | |
| }, | |
| { | |
| "epoch": 1.8360995850622408, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.382433649395782e-05, | |
| "loss": 0.0, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 1.8381742738589213, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.377777279000253e-05, | |
| "loss": 0.0, | |
| "step": 886 | |
| }, | |
| { | |
| "epoch": 1.8402489626556018, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.373115920314213e-05, | |
| "loss": 0.0, | |
| "step": 887 | |
| }, | |
| { | |
| "epoch": 1.8423236514522823, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.368449583116631e-05, | |
| "loss": 0.0, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 1.8443983402489628, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.36377827719692e-05, | |
| "loss": 0.0, | |
| "step": 889 | |
| }, | |
| { | |
| "epoch": 1.8464730290456433, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.35910201235492e-05, | |
| "loss": 0.0, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.8485477178423237, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.35442079840087e-05, | |
| "loss": 0.0, | |
| "step": 891 | |
| }, | |
| { | |
| "epoch": 1.8506224066390042, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.349734645155395e-05, | |
| "loss": 0.0, | |
| "step": 892 | |
| }, | |
| { | |
| "epoch": 1.8526970954356847, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.345043562449477e-05, | |
| "loss": 0.0, | |
| "step": 893 | |
| }, | |
| { | |
| "epoch": 1.8547717842323652, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.340347560124447e-05, | |
| "loss": 0.0, | |
| "step": 894 | |
| }, | |
| { | |
| "epoch": 1.8568464730290457, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.33564664803195e-05, | |
| "loss": 0.0, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 1.8589211618257262, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.330940836033932e-05, | |
| "loss": 0.0, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 1.8609958506224067, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.326230134002625e-05, | |
| "loss": 0.0, | |
| "step": 897 | |
| }, | |
| { | |
| "epoch": 1.8630705394190872, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.321514551820512e-05, | |
| "loss": 0.0, | |
| "step": 898 | |
| }, | |
| { | |
| "epoch": 1.8651452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.316794099380318e-05, | |
| "loss": 0.0, | |
| "step": 899 | |
| }, | |
| { | |
| "epoch": 1.8672199170124482, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.31206878658498e-05, | |
| "loss": 0.0, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.8692946058091287, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.307338623347642e-05, | |
| "loss": 0.0, | |
| "step": 901 | |
| }, | |
| { | |
| "epoch": 1.8713692946058091, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.302603619591611e-05, | |
| "loss": 0.0, | |
| "step": 902 | |
| }, | |
| { | |
| "epoch": 1.8734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.29786378525036e-05, | |
| "loss": 0.0, | |
| "step": 903 | |
| }, | |
| { | |
| "epoch": 1.8755186721991701, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.293119130267486e-05, | |
| "loss": 0.0, | |
| "step": 904 | |
| }, | |
| { | |
| "epoch": 1.8775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.288369664596707e-05, | |
| "loss": 0.0, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 1.879668049792531, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.28361539820183e-05, | |
| "loss": 0.0, | |
| "step": 906 | |
| }, | |
| { | |
| "epoch": 1.8817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.278856341056734e-05, | |
| "loss": 0.0, | |
| "step": 907 | |
| }, | |
| { | |
| "epoch": 1.883817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.274092503145345e-05, | |
| "loss": 0.0, | |
| "step": 908 | |
| }, | |
| { | |
| "epoch": 1.8858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.269323894461627e-05, | |
| "loss": 0.0, | |
| "step": 909 | |
| }, | |
| { | |
| "epoch": 1.887966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.264550525009542e-05, | |
| "loss": 0.0, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.8900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.259772404803047e-05, | |
| "loss": 0.0, | |
| "step": 911 | |
| }, | |
| { | |
| "epoch": 1.892116182572614, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.254989543866063e-05, | |
| "loss": 0.0, | |
| "step": 912 | |
| }, | |
| { | |
| "epoch": 1.8941908713692945, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.250201952232454e-05, | |
| "loss": 0.0, | |
| "step": 913 | |
| }, | |
| { | |
| "epoch": 1.896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.245409639946016e-05, | |
| "loss": 0.0, | |
| "step": 914 | |
| }, | |
| { | |
| "epoch": 1.8983402489626555, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.240612617060437e-05, | |
| "loss": 0.0, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 1.900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.235810893639299e-05, | |
| "loss": 0.0, | |
| "step": 916 | |
| }, | |
| { | |
| "epoch": 1.9024896265560165, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.231004479756036e-05, | |
| "loss": 0.0, | |
| "step": 917 | |
| }, | |
| { | |
| "epoch": 1.904564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.226193385493928e-05, | |
| "loss": 0.0, | |
| "step": 918 | |
| }, | |
| { | |
| "epoch": 1.9066390041493775, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.22137762094607e-05, | |
| "loss": 0.0, | |
| "step": 919 | |
| }, | |
| { | |
| "epoch": 1.908713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.21655719621536e-05, | |
| "loss": 0.0, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.9107883817427385, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.211732121414463e-05, | |
| "loss": 0.0, | |
| "step": 921 | |
| }, | |
| { | |
| "epoch": 1.912863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.20690240666581e-05, | |
| "loss": 0.0, | |
| "step": 922 | |
| }, | |
| { | |
| "epoch": 1.9149377593360994, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.202068062101558e-05, | |
| "loss": 0.0, | |
| "step": 923 | |
| }, | |
| { | |
| "epoch": 1.91701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.197229097863582e-05, | |
| "loss": 0.0, | |
| "step": 924 | |
| }, | |
| { | |
| "epoch": 1.9190871369294604, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.192385524103446e-05, | |
| "loss": 0.0, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 1.921161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.187537350982385e-05, | |
| "loss": 0.0, | |
| "step": 926 | |
| }, | |
| { | |
| "epoch": 1.9232365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.182684588671281e-05, | |
| "loss": 0.0, | |
| "step": 927 | |
| }, | |
| { | |
| "epoch": 1.9253112033195021, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.177827247350646e-05, | |
| "loss": 0.0, | |
| "step": 928 | |
| }, | |
| { | |
| "epoch": 1.9273858921161826, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.172965337210599e-05, | |
| "loss": 0.0, | |
| "step": 929 | |
| }, | |
| { | |
| "epoch": 1.929460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.168098868450841e-05, | |
| "loss": 0.0, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.9315352697095436, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.163227851280639e-05, | |
| "loss": 0.0, | |
| "step": 931 | |
| }, | |
| { | |
| "epoch": 1.933609958506224, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.158352295918797e-05, | |
| "loss": 0.0, | |
| "step": 932 | |
| }, | |
| { | |
| "epoch": 1.9356846473029046, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.153472212593646e-05, | |
| "loss": 0.0, | |
| "step": 933 | |
| }, | |
| { | |
| "epoch": 1.937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.148587611543013e-05, | |
| "loss": 0.0, | |
| "step": 934 | |
| }, | |
| { | |
| "epoch": 1.9398340248962656, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.143698503014203e-05, | |
| "loss": 0.0, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 1.941908713692946, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.138804897263978e-05, | |
| "loss": 0.0, | |
| "step": 936 | |
| }, | |
| { | |
| "epoch": 1.9439834024896265, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.133906804558532e-05, | |
| "loss": 0.0, | |
| "step": 937 | |
| }, | |
| { | |
| "epoch": 1.946058091286307, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.129004235173475e-05, | |
| "loss": 0.0, | |
| "step": 938 | |
| }, | |
| { | |
| "epoch": 1.9481327800829875, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.12409719939381e-05, | |
| "loss": 0.0, | |
| "step": 939 | |
| }, | |
| { | |
| "epoch": 1.950207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.119185707513903e-05, | |
| "loss": 0.0, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.9522821576763485, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.114269769837473e-05, | |
| "loss": 0.0, | |
| "step": 941 | |
| }, | |
| { | |
| "epoch": 1.9543568464730292, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.109349396677568e-05, | |
| "loss": 0.0, | |
| "step": 942 | |
| }, | |
| { | |
| "epoch": 1.9564315352697097, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.104424598356538e-05, | |
| "loss": 0.0, | |
| "step": 943 | |
| }, | |
| { | |
| "epoch": 1.9585062240663902, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0994953852060155e-05, | |
| "loss": 0.0, | |
| "step": 944 | |
| }, | |
| { | |
| "epoch": 1.9605809128630707, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.094561767566898e-05, | |
| "loss": 0.0, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 1.9626556016597512, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.089623755789322e-05, | |
| "loss": 0.0, | |
| "step": 946 | |
| }, | |
| { | |
| "epoch": 1.9647302904564317, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0846813602326386e-05, | |
| "loss": 0.0, | |
| "step": 947 | |
| }, | |
| { | |
| "epoch": 1.9668049792531122, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.079734591265404e-05, | |
| "loss": 0.0, | |
| "step": 948 | |
| }, | |
| { | |
| "epoch": 1.9688796680497926, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0747834592653376e-05, | |
| "loss": 0.0, | |
| "step": 949 | |
| }, | |
| { | |
| "epoch": 1.9709543568464731, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.069827974619324e-05, | |
| "loss": 0.0, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.9730290456431536, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0648681477233706e-05, | |
| "loss": 0.0, | |
| "step": 951 | |
| }, | |
| { | |
| "epoch": 1.9751037344398341, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.059903988982596e-05, | |
| "loss": 0.0, | |
| "step": 952 | |
| }, | |
| { | |
| "epoch": 1.9771784232365146, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.054935508811212e-05, | |
| "loss": 0.0, | |
| "step": 953 | |
| }, | |
| { | |
| "epoch": 1.979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.049962717632488e-05, | |
| "loss": 0.0, | |
| "step": 954 | |
| }, | |
| { | |
| "epoch": 1.9813278008298756, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.044985625878743e-05, | |
| "loss": 0.0, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 1.983402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.040004243991316e-05, | |
| "loss": 0.0, | |
| "step": 956 | |
| }, | |
| { | |
| "epoch": 1.9854771784232366, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0350185824205466e-05, | |
| "loss": 0.0, | |
| "step": 957 | |
| }, | |
| { | |
| "epoch": 1.987551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0300286516257526e-05, | |
| "loss": 0.0, | |
| "step": 958 | |
| }, | |
| { | |
| "epoch": 1.9896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.025034462075209e-05, | |
| "loss": 0.0, | |
| "step": 959 | |
| }, | |
| { | |
| "epoch": 1.991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.020036024246122e-05, | |
| "loss": 0.0, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.9937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.015033348624615e-05, | |
| "loss": 0.0, | |
| "step": 961 | |
| }, | |
| { | |
| "epoch": 1.995850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0100264457056985e-05, | |
| "loss": 0.0, | |
| "step": 962 | |
| }, | |
| { | |
| "epoch": 1.9979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.005015325993252e-05, | |
| "loss": 0.0, | |
| "step": 963 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.000000000000001e-05, | |
| "loss": 0.0, | |
| "step": 964 | |
| }, | |
| { | |
| "epoch": 2.0020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.994980478247496e-05, | |
| "loss": 0.0, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 2.004149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.989956771266089e-05, | |
| "loss": 0.0, | |
| "step": 966 | |
| }, | |
| { | |
| "epoch": 2.0062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.984928889594914e-05, | |
| "loss": 0.0, | |
| "step": 967 | |
| }, | |
| { | |
| "epoch": 2.008298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.97989684378186e-05, | |
| "loss": 0.0, | |
| "step": 968 | |
| }, | |
| { | |
| "epoch": 2.0103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.974860644383555e-05, | |
| "loss": 0.0, | |
| "step": 969 | |
| }, | |
| { | |
| "epoch": 2.012448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.9698203019653365e-05, | |
| "loss": 0.0, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.0145228215767634, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.964775827101239e-05, | |
| "loss": 0.0, | |
| "step": 971 | |
| }, | |
| { | |
| "epoch": 2.016597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.9597272303739614e-05, | |
| "loss": 0.0, | |
| "step": 972 | |
| }, | |
| { | |
| "epoch": 2.0186721991701244, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.954674522374853e-05, | |
| "loss": 0.0, | |
| "step": 973 | |
| }, | |
| { | |
| "epoch": 2.020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.9496177137038864e-05, | |
| "loss": 0.0, | |
| "step": 974 | |
| }, | |
| { | |
| "epoch": 2.0228215767634854, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.9445568149696395e-05, | |
| "loss": 0.0, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 2.024896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.939491836789266e-05, | |
| "loss": 0.0, | |
| "step": 976 | |
| }, | |
| { | |
| "epoch": 2.0269709543568464, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.934422789788481e-05, | |
| "loss": 0.0, | |
| "step": 977 | |
| }, | |
| { | |
| "epoch": 2.029045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.929349684601536e-05, | |
| "loss": 0.0, | |
| "step": 978 | |
| }, | |
| { | |
| "epoch": 2.0311203319502074, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.924272531871195e-05, | |
| "loss": 0.0, | |
| "step": 979 | |
| }, | |
| { | |
| "epoch": 2.033195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.919191342248711e-05, | |
| "loss": 0.0, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.0352697095435683, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.914106126393812e-05, | |
| "loss": 0.0, | |
| "step": 981 | |
| }, | |
| { | |
| "epoch": 2.037344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.909016894974664e-05, | |
| "loss": 0.0, | |
| "step": 982 | |
| }, | |
| { | |
| "epoch": 2.0394190871369293, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.903923658667866e-05, | |
| "loss": 0.0, | |
| "step": 983 | |
| }, | |
| { | |
| "epoch": 2.04149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.898826428158414e-05, | |
| "loss": 0.0, | |
| "step": 984 | |
| }, | |
| { | |
| "epoch": 2.0435684647302903, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.893725214139682e-05, | |
| "loss": 0.0, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 2.045643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.8886200273134046e-05, | |
| "loss": 0.0, | |
| "step": 986 | |
| }, | |
| { | |
| "epoch": 2.0477178423236513, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.88351087838965e-05, | |
| "loss": 0.0, | |
| "step": 987 | |
| }, | |
| { | |
| "epoch": 2.0497925311203318, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.878397778086794e-05, | |
| "loss": 0.0, | |
| "step": 988 | |
| }, | |
| { | |
| "epoch": 2.0518672199170123, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.87328073713151e-05, | |
| "loss": 0.0, | |
| "step": 989 | |
| }, | |
| { | |
| "epoch": 2.0539419087136928, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.868159766258732e-05, | |
| "loss": 0.0, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.0560165975103732, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.8630348762116395e-05, | |
| "loss": 0.0, | |
| "step": 991 | |
| }, | |
| { | |
| "epoch": 2.0580912863070537, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.8579060777416367e-05, | |
| "loss": 0.0, | |
| "step": 992 | |
| }, | |
| { | |
| "epoch": 2.0601659751037342, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.852773381608323e-05, | |
| "loss": 0.0, | |
| "step": 993 | |
| }, | |
| { | |
| "epoch": 2.0622406639004147, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.847636798579481e-05, | |
| "loss": 0.0, | |
| "step": 994 | |
| }, | |
| { | |
| "epoch": 2.064315352697095, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.84249633943104e-05, | |
| "loss": 0.0, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 2.066390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.837352014947065e-05, | |
| "loss": 0.0, | |
| "step": 996 | |
| }, | |
| { | |
| "epoch": 2.0684647302904566, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.832203835919732e-05, | |
| "loss": 0.0, | |
| "step": 997 | |
| }, | |
| { | |
| "epoch": 2.070539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.827051813149299e-05, | |
| "loss": 0.0, | |
| "step": 998 | |
| }, | |
| { | |
| "epoch": 2.0726141078838176, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.8218959574440886e-05, | |
| "loss": 0.0, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 2.074688796680498, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.816736279620467e-05, | |
| "loss": 0.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0767634854771786, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.811572790502817e-05, | |
| "loss": 0.0, | |
| "step": 1001 | |
| }, | |
| { | |
| "epoch": 2.078838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.806405500923517e-05, | |
| "loss": 0.0, | |
| "step": 1002 | |
| }, | |
| { | |
| "epoch": 2.0809128630705396, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.8012344217229196e-05, | |
| "loss": 0.0, | |
| "step": 1003 | |
| }, | |
| { | |
| "epoch": 2.08298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.796059563749325e-05, | |
| "loss": 0.0, | |
| "step": 1004 | |
| }, | |
| { | |
| "epoch": 2.0850622406639006, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.790880937858961e-05, | |
| "loss": 0.0, | |
| "step": 1005 | |
| }, | |
| { | |
| "epoch": 2.087136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.785698554915965e-05, | |
| "loss": 0.0, | |
| "step": 1006 | |
| }, | |
| { | |
| "epoch": 2.0892116182572615, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.78051242579235e-05, | |
| "loss": 0.0, | |
| "step": 1007 | |
| }, | |
| { | |
| "epoch": 2.091286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.775322561367992e-05, | |
| "loss": 0.0, | |
| "step": 1008 | |
| }, | |
| { | |
| "epoch": 2.0933609958506225, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.770128972530602e-05, | |
| "loss": 0.0, | |
| "step": 1009 | |
| }, | |
| { | |
| "epoch": 2.095435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.764931670175701e-05, | |
| "loss": 0.0, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.0975103734439835, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.759730665206609e-05, | |
| "loss": 0.0, | |
| "step": 1011 | |
| }, | |
| { | |
| "epoch": 2.099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.754525968534404e-05, | |
| "loss": 0.0, | |
| "step": 1012 | |
| }, | |
| { | |
| "epoch": 2.1016597510373445, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.749317591077915e-05, | |
| "loss": 0.0, | |
| "step": 1013 | |
| }, | |
| { | |
| "epoch": 2.103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.74410554376369e-05, | |
| "loss": 0.0, | |
| "step": 1014 | |
| }, | |
| { | |
| "epoch": 2.1058091286307055, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.7388898375259783e-05, | |
| "loss": 0.0, | |
| "step": 1015 | |
| }, | |
| { | |
| "epoch": 2.107883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.733670483306702e-05, | |
| "loss": 0.0, | |
| "step": 1016 | |
| }, | |
| { | |
| "epoch": 2.1099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.72844749205544e-05, | |
| "loss": 0.0, | |
| "step": 1017 | |
| }, | |
| { | |
| "epoch": 2.112033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.723220874729398e-05, | |
| "loss": 0.0, | |
| "step": 1018 | |
| }, | |
| { | |
| "epoch": 2.1141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.717990642293389e-05, | |
| "loss": 0.0, | |
| "step": 1019 | |
| }, | |
| { | |
| "epoch": 2.116182572614108, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.712756805719814e-05, | |
| "loss": 0.0, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.1182572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.707519375988629e-05, | |
| "loss": 0.0, | |
| "step": 1021 | |
| }, | |
| { | |
| "epoch": 2.120331950207469, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.7022783640873326e-05, | |
| "loss": 0.0, | |
| "step": 1022 | |
| }, | |
| { | |
| "epoch": 2.1224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.697033781010937e-05, | |
| "loss": 0.0, | |
| "step": 1023 | |
| }, | |
| { | |
| "epoch": 2.12448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6917856377619464e-05, | |
| "loss": 0.0, | |
| "step": 1024 | |
| }, | |
| { | |
| "epoch": 2.1265560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6865339453503336e-05, | |
| "loss": 0.0, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 2.128630705394191, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6812787147935155e-05, | |
| "loss": 0.0, | |
| "step": 1026 | |
| }, | |
| { | |
| "epoch": 2.1307053941908713, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.676019957116335e-05, | |
| "loss": 0.0, | |
| "step": 1027 | |
| }, | |
| { | |
| "epoch": 2.132780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6707576833510324e-05, | |
| "loss": 0.0, | |
| "step": 1028 | |
| }, | |
| { | |
| "epoch": 2.1348547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.665491904537224e-05, | |
| "loss": 0.0, | |
| "step": 1029 | |
| }, | |
| { | |
| "epoch": 2.136929460580913, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6602226317218795e-05, | |
| "loss": 0.0, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.1390041493775933, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6549498759593e-05, | |
| "loss": 0.0, | |
| "step": 1031 | |
| }, | |
| { | |
| "epoch": 2.141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.649673648311092e-05, | |
| "loss": 0.0, | |
| "step": 1032 | |
| }, | |
| { | |
| "epoch": 2.1431535269709543, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.644393959846147e-05, | |
| "loss": 0.0, | |
| "step": 1033 | |
| }, | |
| { | |
| "epoch": 2.145228215767635, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.639110821640614e-05, | |
| "loss": 0.0, | |
| "step": 1034 | |
| }, | |
| { | |
| "epoch": 2.1473029045643153, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6338242447778835e-05, | |
| "loss": 0.0, | |
| "step": 1035 | |
| }, | |
| { | |
| "epoch": 2.1493775933609958, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.628534240348555e-05, | |
| "loss": 0.0, | |
| "step": 1036 | |
| }, | |
| { | |
| "epoch": 2.1514522821576763, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.623240819450421e-05, | |
| "loss": 0.0, | |
| "step": 1037 | |
| }, | |
| { | |
| "epoch": 2.1535269709543567, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6179439931884425e-05, | |
| "loss": 0.0, | |
| "step": 1038 | |
| }, | |
| { | |
| "epoch": 2.1556016597510372, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6126437726747235e-05, | |
| "loss": 0.0, | |
| "step": 1039 | |
| }, | |
| { | |
| "epoch": 2.1576763485477177, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.6073401690284886e-05, | |
| "loss": 0.0, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.159751037344398, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.602033193376059e-05, | |
| "loss": 0.0, | |
| "step": 1041 | |
| }, | |
| { | |
| "epoch": 2.1618257261410787, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.5967228568508326e-05, | |
| "loss": 0.0, | |
| "step": 1042 | |
| }, | |
| { | |
| "epoch": 2.163900414937759, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.591409170593253e-05, | |
| "loss": 0.0, | |
| "step": 1043 | |
| }, | |
| { | |
| "epoch": 2.1659751037344397, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.586092145750798e-05, | |
| "loss": 0.0, | |
| "step": 1044 | |
| }, | |
| { | |
| "epoch": 2.16804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.580771793477942e-05, | |
| "loss": 0.0, | |
| "step": 1045 | |
| }, | |
| { | |
| "epoch": 2.1701244813278007, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.575448124936147e-05, | |
| "loss": 0.0, | |
| "step": 1046 | |
| }, | |
| { | |
| "epoch": 2.172199170124481, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.570121151293827e-05, | |
| "loss": 0.0, | |
| "step": 1047 | |
| }, | |
| { | |
| "epoch": 2.1742738589211617, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.564790883726331e-05, | |
| "loss": 0.0, | |
| "step": 1048 | |
| }, | |
| { | |
| "epoch": 2.176348547717842, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.559457333415921e-05, | |
| "loss": 0.0, | |
| "step": 1049 | |
| }, | |
| { | |
| "epoch": 2.1784232365145226, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.554120511551742e-05, | |
| "loss": 0.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.180497925311203, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.548780429329803e-05, | |
| "loss": 0.0, | |
| "step": 1051 | |
| }, | |
| { | |
| "epoch": 2.1825726141078836, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.543437097952956e-05, | |
| "loss": 0.0, | |
| "step": 1052 | |
| }, | |
| { | |
| "epoch": 2.1846473029045645, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.538090528630865e-05, | |
| "loss": 0.0, | |
| "step": 1053 | |
| }, | |
| { | |
| "epoch": 2.186721991701245, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.5327407325799894e-05, | |
| "loss": 0.0, | |
| "step": 1054 | |
| }, | |
| { | |
| "epoch": 2.1887966804979255, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.527387721023558e-05, | |
| "loss": 0.0, | |
| "step": 1055 | |
| }, | |
| { | |
| "epoch": 2.190871369294606, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.522031505191544e-05, | |
| "loss": 0.0, | |
| "step": 1056 | |
| }, | |
| { | |
| "epoch": 2.1929460580912865, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.516672096320643e-05, | |
| "loss": 0.0, | |
| "step": 1057 | |
| }, | |
| { | |
| "epoch": 2.195020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.511309505654252e-05, | |
| "loss": 0.0, | |
| "step": 1058 | |
| }, | |
| { | |
| "epoch": 2.1970954356846475, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.5059437444424375e-05, | |
| "loss": 0.0, | |
| "step": 1059 | |
| }, | |
| { | |
| "epoch": 2.199170124481328, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.5005748239419235e-05, | |
| "loss": 0.0, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.2012448132780085, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4952027554160564e-05, | |
| "loss": 0.0, | |
| "step": 1061 | |
| }, | |
| { | |
| "epoch": 2.203319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4898275501347906e-05, | |
| "loss": 0.0, | |
| "step": 1062 | |
| }, | |
| { | |
| "epoch": 2.2053941908713695, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.484449219374661e-05, | |
| "loss": 0.0, | |
| "step": 1063 | |
| }, | |
| { | |
| "epoch": 2.20746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4790677744187556e-05, | |
| "loss": 0.0, | |
| "step": 1064 | |
| }, | |
| { | |
| "epoch": 2.2095435684647304, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4736832265566996e-05, | |
| "loss": 0.0, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 2.211618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4682955870846264e-05, | |
| "loss": 0.0, | |
| "step": 1066 | |
| }, | |
| { | |
| "epoch": 2.2136929460580914, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4629048673051546e-05, | |
| "loss": 0.0, | |
| "step": 1067 | |
| }, | |
| { | |
| "epoch": 2.215767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.457511078527365e-05, | |
| "loss": 0.0, | |
| "step": 1068 | |
| }, | |
| { | |
| "epoch": 2.2178423236514524, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4521142320667785e-05, | |
| "loss": 0.0, | |
| "step": 1069 | |
| }, | |
| { | |
| "epoch": 2.219917012448133, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.446714339245327e-05, | |
| "loss": 0.0, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.2219917012448134, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.441311411391339e-05, | |
| "loss": 0.0, | |
| "step": 1071 | |
| }, | |
| { | |
| "epoch": 2.224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.435905459839504e-05, | |
| "loss": 0.0, | |
| "step": 1072 | |
| }, | |
| { | |
| "epoch": 2.2261410788381744, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4304964959308585e-05, | |
| "loss": 0.0, | |
| "step": 1073 | |
| }, | |
| { | |
| "epoch": 2.228215767634855, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.425084531012758e-05, | |
| "loss": 0.0, | |
| "step": 1074 | |
| }, | |
| { | |
| "epoch": 2.2302904564315353, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4196695764388534e-05, | |
| "loss": 0.0, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 2.232365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.414251643569067e-05, | |
| "loss": 0.0, | |
| "step": 1076 | |
| }, | |
| { | |
| "epoch": 2.2344398340248963, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.408830743769571e-05, | |
| "loss": 0.0, | |
| "step": 1077 | |
| }, | |
| { | |
| "epoch": 2.236514522821577, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4034068884127594e-05, | |
| "loss": 0.0, | |
| "step": 1078 | |
| }, | |
| { | |
| "epoch": 2.2385892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.3979800888772286e-05, | |
| "loss": 0.0, | |
| "step": 1079 | |
| }, | |
| { | |
| "epoch": 2.240663900414938, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.39255035654775e-05, | |
| "loss": 0.0, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.2427385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.387117702815248e-05, | |
| "loss": 0.0, | |
| "step": 1081 | |
| }, | |
| { | |
| "epoch": 2.2448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.381682139076779e-05, | |
| "loss": 0.0, | |
| "step": 1082 | |
| }, | |
| { | |
| "epoch": 2.2468879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.376243676735498e-05, | |
| "loss": 0.0, | |
| "step": 1083 | |
| }, | |
| { | |
| "epoch": 2.2489626556016598, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.370802327200646e-05, | |
| "loss": 0.0, | |
| "step": 1084 | |
| }, | |
| { | |
| "epoch": 2.2510373443983402, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.36535810188752e-05, | |
| "loss": 0.0, | |
| "step": 1085 | |
| }, | |
| { | |
| "epoch": 2.2531120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.359911012217448e-05, | |
| "loss": 0.0, | |
| "step": 1086 | |
| }, | |
| { | |
| "epoch": 2.2551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.3544610696177683e-05, | |
| "loss": 0.0, | |
| "step": 1087 | |
| }, | |
| { | |
| "epoch": 2.2572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.349008285521807e-05, | |
| "loss": 0.0, | |
| "step": 1088 | |
| }, | |
| { | |
| "epoch": 2.259336099585062, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.343552671368846e-05, | |
| "loss": 0.0, | |
| "step": 1089 | |
| }, | |
| { | |
| "epoch": 2.2614107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.338094238604108e-05, | |
| "loss": 0.0, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.263485477178423, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.332632998678728e-05, | |
| "loss": 0.0, | |
| "step": 1091 | |
| }, | |
| { | |
| "epoch": 2.2655601659751037, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.3271689630497286e-05, | |
| "loss": 0.0, | |
| "step": 1092 | |
| }, | |
| { | |
| "epoch": 2.267634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.3217021431800015e-05, | |
| "loss": 0.0, | |
| "step": 1093 | |
| }, | |
| { | |
| "epoch": 2.2697095435684647, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.316232550538275e-05, | |
| "loss": 0.0, | |
| "step": 1094 | |
| }, | |
| { | |
| "epoch": 2.271784232365145, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.310760196599096e-05, | |
| "loss": 0.0, | |
| "step": 1095 | |
| }, | |
| { | |
| "epoch": 2.2738589211618256, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.3052850928428055e-05, | |
| "loss": 0.0, | |
| "step": 1096 | |
| }, | |
| { | |
| "epoch": 2.275933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.299807250755511e-05, | |
| "loss": 0.0, | |
| "step": 1097 | |
| }, | |
| { | |
| "epoch": 2.2780082987551866, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.294326681829066e-05, | |
| "loss": 0.0, | |
| "step": 1098 | |
| }, | |
| { | |
| "epoch": 2.280082987551867, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.288843397561045e-05, | |
| "loss": 0.0, | |
| "step": 1099 | |
| }, | |
| { | |
| "epoch": 2.2821576763485476, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.283357409454716e-05, | |
| "loss": 0.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.284232365145228, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.277868729019027e-05, | |
| "loss": 0.0, | |
| "step": 1101 | |
| }, | |
| { | |
| "epoch": 2.2863070539419086, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.2723773677685634e-05, | |
| "loss": 0.0, | |
| "step": 1102 | |
| }, | |
| { | |
| "epoch": 2.288381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.266883337223542e-05, | |
| "loss": 0.0, | |
| "step": 1103 | |
| }, | |
| { | |
| "epoch": 2.2904564315352696, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.261386648909778e-05, | |
| "loss": 0.0, | |
| "step": 1104 | |
| }, | |
| { | |
| "epoch": 2.29253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.255887314358663e-05, | |
| "loss": 0.0, | |
| "step": 1105 | |
| }, | |
| { | |
| "epoch": 2.2946058091286305, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.250385345107134e-05, | |
| "loss": 0.0, | |
| "step": 1106 | |
| }, | |
| { | |
| "epoch": 2.296680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.244880752697667e-05, | |
| "loss": 0.0, | |
| "step": 1107 | |
| }, | |
| { | |
| "epoch": 2.2987551867219915, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.239373548678229e-05, | |
| "loss": 0.0, | |
| "step": 1108 | |
| }, | |
| { | |
| "epoch": 2.300829875518672, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.233863744602273e-05, | |
| "loss": 0.0, | |
| "step": 1109 | |
| }, | |
| { | |
| "epoch": 2.3029045643153525, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.228351352028707e-05, | |
| "loss": 0.0, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.304979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.222836382521864e-05, | |
| "loss": 0.0, | |
| "step": 1111 | |
| }, | |
| { | |
| "epoch": 2.3070539419087135, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.217318847651485e-05, | |
| "loss": 0.0, | |
| "step": 1112 | |
| }, | |
| { | |
| "epoch": 2.309128630705394, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.211798758992698e-05, | |
| "loss": 0.0, | |
| "step": 1113 | |
| }, | |
| { | |
| "epoch": 2.3112033195020745, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.206276128125981e-05, | |
| "loss": 0.0, | |
| "step": 1114 | |
| }, | |
| { | |
| "epoch": 2.313278008298755, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.2007509666371524e-05, | |
| "loss": 0.0, | |
| "step": 1115 | |
| }, | |
| { | |
| "epoch": 2.3153526970954355, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.195223286117332e-05, | |
| "loss": 0.0, | |
| "step": 1116 | |
| }, | |
| { | |
| "epoch": 2.317427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1896930981629305e-05, | |
| "loss": 0.0, | |
| "step": 1117 | |
| }, | |
| { | |
| "epoch": 2.3195020746887964, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1841604143756175e-05, | |
| "loss": 0.0, | |
| "step": 1118 | |
| }, | |
| { | |
| "epoch": 2.3215767634854774, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1786252463622946e-05, | |
| "loss": 0.0, | |
| "step": 1119 | |
| }, | |
| { | |
| "epoch": 2.323651452282158, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.173087605735079e-05, | |
| "loss": 0.0, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.3257261410788383, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1675475041112765e-05, | |
| "loss": 0.0, | |
| "step": 1121 | |
| }, | |
| { | |
| "epoch": 2.327800829875519, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.162004953113351e-05, | |
| "loss": 0.0, | |
| "step": 1122 | |
| }, | |
| { | |
| "epoch": 2.3298755186721993, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.156459964368907e-05, | |
| "loss": 0.0, | |
| "step": 1123 | |
| }, | |
| { | |
| "epoch": 2.33195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.150912549510665e-05, | |
| "loss": 0.0, | |
| "step": 1124 | |
| }, | |
| { | |
| "epoch": 2.3340248962655603, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.145362720176433e-05, | |
| "loss": 0.0, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 2.336099585062241, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.139810488009085e-05, | |
| "loss": 0.0, | |
| "step": 1126 | |
| }, | |
| { | |
| "epoch": 2.3381742738589213, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.134255864656536e-05, | |
| "loss": 0.0, | |
| "step": 1127 | |
| }, | |
| { | |
| "epoch": 2.340248962655602, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.128698861771716e-05, | |
| "loss": 0.0, | |
| "step": 1128 | |
| }, | |
| { | |
| "epoch": 2.3423236514522823, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1231394910125486e-05, | |
| "loss": 0.0, | |
| "step": 1129 | |
| }, | |
| { | |
| "epoch": 2.3443983402489628, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.117577764041926e-05, | |
| "loss": 0.0, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.3464730290456433, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.112013692527679e-05, | |
| "loss": 0.0, | |
| "step": 1131 | |
| }, | |
| { | |
| "epoch": 2.3485477178423237, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.106447288142564e-05, | |
| "loss": 0.0, | |
| "step": 1132 | |
| }, | |
| { | |
| "epoch": 2.3506224066390042, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.100878562564222e-05, | |
| "loss": 0.0, | |
| "step": 1133 | |
| }, | |
| { | |
| "epoch": 2.3526970954356847, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.095307527475174e-05, | |
| "loss": 0.0, | |
| "step": 1134 | |
| }, | |
| { | |
| "epoch": 2.354771784232365, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.089734194562779e-05, | |
| "loss": 0.0, | |
| "step": 1135 | |
| }, | |
| { | |
| "epoch": 2.3568464730290457, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.084158575519217e-05, | |
| "loss": 0.0, | |
| "step": 1136 | |
| }, | |
| { | |
| "epoch": 2.358921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.078580682041468e-05, | |
| "loss": 0.0, | |
| "step": 1137 | |
| }, | |
| { | |
| "epoch": 2.3609958506224067, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.07300052583128e-05, | |
| "loss": 0.0, | |
| "step": 1138 | |
| }, | |
| { | |
| "epoch": 2.363070539419087, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.0674181185951486e-05, | |
| "loss": 0.0, | |
| "step": 1139 | |
| }, | |
| { | |
| "epoch": 2.3651452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.061833472044292e-05, | |
| "loss": 0.0, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.367219917012448, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.056246597894628e-05, | |
| "loss": 0.0, | |
| "step": 1141 | |
| }, | |
| { | |
| "epoch": 2.3692946058091287, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.050657507866746e-05, | |
| "loss": 0.0, | |
| "step": 1142 | |
| }, | |
| { | |
| "epoch": 2.371369294605809, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.045066213685883e-05, | |
| "loss": 0.0, | |
| "step": 1143 | |
| }, | |
| { | |
| "epoch": 2.3734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.039472727081902e-05, | |
| "loss": 0.0, | |
| "step": 1144 | |
| }, | |
| { | |
| "epoch": 2.37551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.0338770597892624e-05, | |
| "loss": 0.0, | |
| "step": 1145 | |
| }, | |
| { | |
| "epoch": 2.3775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.028279223547004e-05, | |
| "loss": 0.0, | |
| "step": 1146 | |
| }, | |
| { | |
| "epoch": 2.379668049792531, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.02267923009871e-05, | |
| "loss": 0.0, | |
| "step": 1147 | |
| }, | |
| { | |
| "epoch": 2.3817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.0170770911924963e-05, | |
| "loss": 0.0, | |
| "step": 1148 | |
| }, | |
| { | |
| "epoch": 2.383817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.0114728185809735e-05, | |
| "loss": 0.0, | |
| "step": 1149 | |
| }, | |
| { | |
| "epoch": 2.3858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.00586642402123e-05, | |
| "loss": 0.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.387966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.000257919274809e-05, | |
| "loss": 0.0, | |
| "step": 1151 | |
| }, | |
| { | |
| "epoch": 2.3900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.994647316107678e-05, | |
| "loss": 0.0, | |
| "step": 1152 | |
| }, | |
| { | |
| "epoch": 2.392116182572614, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.989034626290205e-05, | |
| "loss": 0.0, | |
| "step": 1153 | |
| }, | |
| { | |
| "epoch": 2.3941908713692945, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.9834198615971405e-05, | |
| "loss": 0.0, | |
| "step": 1154 | |
| }, | |
| { | |
| "epoch": 2.396265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.977803033807584e-05, | |
| "loss": 0.0, | |
| "step": 1155 | |
| }, | |
| { | |
| "epoch": 2.3983402489626555, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.972184154704962e-05, | |
| "loss": 0.0, | |
| "step": 1156 | |
| }, | |
| { | |
| "epoch": 2.400414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.96656323607701e-05, | |
| "loss": 0.0, | |
| "step": 1157 | |
| }, | |
| { | |
| "epoch": 2.4024896265560165, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.9609402897157354e-05, | |
| "loss": 0.0, | |
| "step": 1158 | |
| }, | |
| { | |
| "epoch": 2.404564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.955315327417405e-05, | |
| "loss": 0.0, | |
| "step": 1159 | |
| }, | |
| { | |
| "epoch": 2.4066390041493775, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.949688360982512e-05, | |
| "loss": 0.0, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.408713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.944059402215754e-05, | |
| "loss": 0.0, | |
| "step": 1161 | |
| }, | |
| { | |
| "epoch": 2.4107883817427385, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.93842846292601e-05, | |
| "loss": 0.0, | |
| "step": 1162 | |
| }, | |
| { | |
| "epoch": 2.412863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.932795554926312e-05, | |
| "loss": 0.0, | |
| "step": 1163 | |
| }, | |
| { | |
| "epoch": 2.4149377593360994, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.927160690033823e-05, | |
| "loss": 0.0, | |
| "step": 1164 | |
| }, | |
| { | |
| "epoch": 2.41701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.921523880069812e-05, | |
| "loss": 0.0, | |
| "step": 1165 | |
| }, | |
| { | |
| "epoch": 2.4190871369294604, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.915885136859627e-05, | |
| "loss": 0.0, | |
| "step": 1166 | |
| }, | |
| { | |
| "epoch": 2.421161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.9102444722326726e-05, | |
| "loss": 0.0, | |
| "step": 1167 | |
| }, | |
| { | |
| "epoch": 2.4232365145228214, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.904601898022385e-05, | |
| "loss": 0.0, | |
| "step": 1168 | |
| }, | |
| { | |
| "epoch": 2.425311203319502, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.898957426066204e-05, | |
| "loss": 0.0, | |
| "step": 1169 | |
| }, | |
| { | |
| "epoch": 2.4273858921161824, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.893311068205554e-05, | |
| "loss": 0.0, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.429460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.887662836285814e-05, | |
| "loss": 0.0, | |
| "step": 1171 | |
| }, | |
| { | |
| "epoch": 2.431535269709544, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.882012742156293e-05, | |
| "loss": 0.0, | |
| "step": 1172 | |
| }, | |
| { | |
| "epoch": 2.4336099585062243, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.876360797670209e-05, | |
| "loss": 0.0, | |
| "step": 1173 | |
| }, | |
| { | |
| "epoch": 2.435684647302905, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.870707014684663e-05, | |
| "loss": 0.0, | |
| "step": 1174 | |
| }, | |
| { | |
| "epoch": 2.4377593360995853, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.8650514050606095e-05, | |
| "loss": 0.0, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 2.4398340248962658, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.8593939806628374e-05, | |
| "loss": 0.0, | |
| "step": 1176 | |
| }, | |
| { | |
| "epoch": 2.4419087136929463, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.853734753359942e-05, | |
| "loss": 0.0, | |
| "step": 1177 | |
| }, | |
| { | |
| "epoch": 2.4439834024896268, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.8480737350243e-05, | |
| "loss": 0.0, | |
| "step": 1178 | |
| }, | |
| { | |
| "epoch": 2.4460580912863072, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.842410937532047e-05, | |
| "loss": 0.0, | |
| "step": 1179 | |
| }, | |
| { | |
| "epoch": 2.4481327800829877, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.836746372763051e-05, | |
| "loss": 0.0, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.4502074688796682, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.831080052600887e-05, | |
| "loss": 0.0, | |
| "step": 1181 | |
| }, | |
| { | |
| "epoch": 2.4522821576763487, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.825411988932812e-05, | |
| "loss": 0.0, | |
| "step": 1182 | |
| }, | |
| { | |
| "epoch": 2.454356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.8197421936497405e-05, | |
| "loss": 0.0, | |
| "step": 1183 | |
| }, | |
| { | |
| "epoch": 2.4564315352697097, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.814070678646223e-05, | |
| "loss": 0.0, | |
| "step": 1184 | |
| }, | |
| { | |
| "epoch": 2.45850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.808397455820414e-05, | |
| "loss": 0.0, | |
| "step": 1185 | |
| }, | |
| { | |
| "epoch": 2.4605809128630707, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.802722537074051e-05, | |
| "loss": 0.0, | |
| "step": 1186 | |
| }, | |
| { | |
| "epoch": 2.462655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.797045934312432e-05, | |
| "loss": 0.0, | |
| "step": 1187 | |
| }, | |
| { | |
| "epoch": 2.4647302904564317, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.7913676594443856e-05, | |
| "loss": 0.0, | |
| "step": 1188 | |
| }, | |
| { | |
| "epoch": 2.466804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.785687724382248e-05, | |
| "loss": 0.0, | |
| "step": 1189 | |
| }, | |
| { | |
| "epoch": 2.4688796680497926, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.780006141041842e-05, | |
| "loss": 0.0, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.470954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.7743229213424425e-05, | |
| "loss": 0.0, | |
| "step": 1191 | |
| }, | |
| { | |
| "epoch": 2.4730290456431536, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.768638077206762e-05, | |
| "loss": 0.0, | |
| "step": 1192 | |
| }, | |
| { | |
| "epoch": 2.475103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.76295162056092e-05, | |
| "loss": 0.0, | |
| "step": 1193 | |
| }, | |
| { | |
| "epoch": 2.4771784232365146, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.757263563334415e-05, | |
| "loss": 0.0, | |
| "step": 1194 | |
| }, | |
| { | |
| "epoch": 2.479253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.751573917460109e-05, | |
| "loss": 0.0, | |
| "step": 1195 | |
| }, | |
| { | |
| "epoch": 2.4813278008298756, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.745882694874194e-05, | |
| "loss": 0.0, | |
| "step": 1196 | |
| }, | |
| { | |
| "epoch": 2.483402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.740189907516169e-05, | |
| "loss": 0.0, | |
| "step": 1197 | |
| }, | |
| { | |
| "epoch": 2.4854771784232366, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.7344955673288165e-05, | |
| "loss": 0.0, | |
| "step": 1198 | |
| }, | |
| { | |
| "epoch": 2.487551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.728799686258176e-05, | |
| "loss": 0.0, | |
| "step": 1199 | |
| }, | |
| { | |
| "epoch": 2.4896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.723102276253525e-05, | |
| "loss": 0.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.491701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.717403349267338e-05, | |
| "loss": 0.0, | |
| "step": 1201 | |
| }, | |
| { | |
| "epoch": 2.4937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.71170291725528e-05, | |
| "loss": 0.0, | |
| "step": 1202 | |
| }, | |
| { | |
| "epoch": 2.495850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.706000992176172e-05, | |
| "loss": 0.0, | |
| "step": 1203 | |
| }, | |
| { | |
| "epoch": 2.4979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.7002975859919675e-05, | |
| "loss": 0.0, | |
| "step": 1204 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.694592710667723e-05, | |
| "loss": 0.0, | |
| "step": 1205 | |
| }, | |
| { | |
| "epoch": 2.5020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.6888863781715814e-05, | |
| "loss": 0.0, | |
| "step": 1206 | |
| }, | |
| { | |
| "epoch": 2.504149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.683178600474741e-05, | |
| "loss": 0.0, | |
| "step": 1207 | |
| }, | |
| { | |
| "epoch": 2.5062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.677469389551435e-05, | |
| "loss": 0.0, | |
| "step": 1208 | |
| }, | |
| { | |
| "epoch": 2.508298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.671758757378898e-05, | |
| "loss": 0.0, | |
| "step": 1209 | |
| }, | |
| { | |
| "epoch": 2.5103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.6660467159373506e-05, | |
| "loss": 0.0, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.512448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.660333277209965e-05, | |
| "loss": 0.0, | |
| "step": 1211 | |
| }, | |
| { | |
| "epoch": 2.5145228215767634, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.6546184531828516e-05, | |
| "loss": 0.0, | |
| "step": 1212 | |
| }, | |
| { | |
| "epoch": 2.516597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.648902255845021e-05, | |
| "loss": 0.0, | |
| "step": 1213 | |
| }, | |
| { | |
| "epoch": 2.5186721991701244, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.64318469718837e-05, | |
| "loss": 0.0, | |
| "step": 1214 | |
| }, | |
| { | |
| "epoch": 2.520746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.637465789207645e-05, | |
| "loss": 0.0, | |
| "step": 1215 | |
| }, | |
| { | |
| "epoch": 2.5228215767634854, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.6317455439004276e-05, | |
| "loss": 0.0, | |
| "step": 1216 | |
| }, | |
| { | |
| "epoch": 2.524896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.626023973267105e-05, | |
| "loss": 0.0, | |
| "step": 1217 | |
| }, | |
| { | |
| "epoch": 2.5269709543568464, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.620301089310843e-05, | |
| "loss": 0.0, | |
| "step": 1218 | |
| }, | |
| { | |
| "epoch": 2.529045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.614576904037563e-05, | |
| "loss": 0.0, | |
| "step": 1219 | |
| }, | |
| { | |
| "epoch": 2.5311203319502074, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.608851429455916e-05, | |
| "loss": 0.0, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.533195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.603124677577261e-05, | |
| "loss": 0.0, | |
| "step": 1221 | |
| }, | |
| { | |
| "epoch": 2.5352697095435683, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.597396660415631e-05, | |
| "loss": 0.0, | |
| "step": 1222 | |
| }, | |
| { | |
| "epoch": 2.537344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.5916673899877185e-05, | |
| "loss": 0.0, | |
| "step": 1223 | |
| }, | |
| { | |
| "epoch": 2.5394190871369293, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.585936878312842e-05, | |
| "loss": 0.0, | |
| "step": 1224 | |
| }, | |
| { | |
| "epoch": 2.54149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.580205137412926e-05, | |
| "loss": 0.0, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 2.5435684647302903, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.574472179312473e-05, | |
| "loss": 0.0, | |
| "step": 1226 | |
| }, | |
| { | |
| "epoch": 2.545643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.568738016038539e-05, | |
| "loss": 0.0, | |
| "step": 1227 | |
| }, | |
| { | |
| "epoch": 2.5477178423236513, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.563002659620708e-05, | |
| "loss": 0.0, | |
| "step": 1228 | |
| }, | |
| { | |
| "epoch": 2.5497925311203318, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.557266122091067e-05, | |
| "loss": 0.0, | |
| "step": 1229 | |
| }, | |
| { | |
| "epoch": 2.5518672199170123, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.551528415484181e-05, | |
| "loss": 0.0, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.5539419087136928, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.5457895518370696e-05, | |
| "loss": 0.0, | |
| "step": 1231 | |
| }, | |
| { | |
| "epoch": 2.5560165975103732, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.5400495431891766e-05, | |
| "loss": 0.0, | |
| "step": 1232 | |
| }, | |
| { | |
| "epoch": 2.5580912863070537, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.5343084015823496e-05, | |
| "loss": 0.0, | |
| "step": 1233 | |
| }, | |
| { | |
| "epoch": 2.5601659751037342, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.5285661390608125e-05, | |
| "loss": 0.0, | |
| "step": 1234 | |
| }, | |
| { | |
| "epoch": 2.5622406639004147, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.52282276767114e-05, | |
| "loss": 0.0, | |
| "step": 1235 | |
| }, | |
| { | |
| "epoch": 2.564315352697095, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.5170782994622355e-05, | |
| "loss": 0.0, | |
| "step": 1236 | |
| }, | |
| { | |
| "epoch": 2.5663900414937757, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.511332746485301e-05, | |
| "loss": 0.0, | |
| "step": 1237 | |
| }, | |
| { | |
| "epoch": 2.568464730290456, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.505586120793814e-05, | |
| "loss": 0.0, | |
| "step": 1238 | |
| }, | |
| { | |
| "epoch": 2.5705394190871367, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.499838434443506e-05, | |
| "loss": 0.0, | |
| "step": 1239 | |
| }, | |
| { | |
| "epoch": 2.572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.4940896994923285e-05, | |
| "loss": 0.0, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.5746887966804977, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.488339928000436e-05, | |
| "loss": 0.0, | |
| "step": 1241 | |
| }, | |
| { | |
| "epoch": 2.576763485477178, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.482589132030159e-05, | |
| "loss": 0.0, | |
| "step": 1242 | |
| }, | |
| { | |
| "epoch": 2.578838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.476837323645973e-05, | |
| "loss": 0.0, | |
| "step": 1243 | |
| }, | |
| { | |
| "epoch": 2.5809128630705396, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.47108451491448e-05, | |
| "loss": 0.0, | |
| "step": 1244 | |
| }, | |
| { | |
| "epoch": 2.58298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.465330717904381e-05, | |
| "loss": 0.0, | |
| "step": 1245 | |
| }, | |
| { | |
| "epoch": 2.5850622406639006, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.4595759446864495e-05, | |
| "loss": 0.0, | |
| "step": 1246 | |
| }, | |
| { | |
| "epoch": 2.587136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.453820207333507e-05, | |
| "loss": 0.0, | |
| "step": 1247 | |
| }, | |
| { | |
| "epoch": 2.5892116182572615, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.448063517920396e-05, | |
| "loss": 0.0, | |
| "step": 1248 | |
| }, | |
| { | |
| "epoch": 2.591286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.44230588852396e-05, | |
| "loss": 0.0, | |
| "step": 1249 | |
| }, | |
| { | |
| "epoch": 2.5933609958506225, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.4365473312230114e-05, | |
| "loss": 0.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.595435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.430787858098311e-05, | |
| "loss": 0.0, | |
| "step": 1251 | |
| }, | |
| { | |
| "epoch": 2.5975103734439835, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.425027481232538e-05, | |
| "loss": 0.0, | |
| "step": 1252 | |
| }, | |
| { | |
| "epoch": 2.599585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.4192662127102726e-05, | |
| "loss": 0.0, | |
| "step": 1253 | |
| }, | |
| { | |
| "epoch": 2.6016597510373445, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.413504064617961e-05, | |
| "loss": 0.0, | |
| "step": 1254 | |
| }, | |
| { | |
| "epoch": 2.603734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.4077410490438954e-05, | |
| "loss": 0.0, | |
| "step": 1255 | |
| }, | |
| { | |
| "epoch": 2.6058091286307055, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.4019771780781915e-05, | |
| "loss": 0.0, | |
| "step": 1256 | |
| }, | |
| { | |
| "epoch": 2.607883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.396212463812754e-05, | |
| "loss": 0.0, | |
| "step": 1257 | |
| }, | |
| { | |
| "epoch": 2.6099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.390446918341262e-05, | |
| "loss": 0.0, | |
| "step": 1258 | |
| }, | |
| { | |
| "epoch": 2.612033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.384680553759134e-05, | |
| "loss": 0.0, | |
| "step": 1259 | |
| }, | |
| { | |
| "epoch": 2.6141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.378913382163509e-05, | |
| "loss": 0.0, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.616182572614108, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.37314541565322e-05, | |
| "loss": 0.0, | |
| "step": 1261 | |
| }, | |
| { | |
| "epoch": 2.6182572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.367376666328765e-05, | |
| "loss": 0.0, | |
| "step": 1262 | |
| }, | |
| { | |
| "epoch": 2.620331950207469, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.361607146292286e-05, | |
| "loss": 0.0, | |
| "step": 1263 | |
| }, | |
| { | |
| "epoch": 2.6224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.355836867647543e-05, | |
| "loss": 0.0, | |
| "step": 1264 | |
| }, | |
| { | |
| "epoch": 2.62448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.3500658424998836e-05, | |
| "loss": 0.0, | |
| "step": 1265 | |
| }, | |
| { | |
| "epoch": 2.6265560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.344294082956225e-05, | |
| "loss": 0.0, | |
| "step": 1266 | |
| }, | |
| { | |
| "epoch": 2.628630705394191, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.3385216011250254e-05, | |
| "loss": 0.0, | |
| "step": 1267 | |
| }, | |
| { | |
| "epoch": 2.6307053941908713, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.332748409116255e-05, | |
| "loss": 0.0, | |
| "step": 1268 | |
| }, | |
| { | |
| "epoch": 2.632780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.326974519041375e-05, | |
| "loss": 0.0, | |
| "step": 1269 | |
| }, | |
| { | |
| "epoch": 2.6348547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.321199943013315e-05, | |
| "loss": 0.0, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.636929460580913, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.3154246931464365e-05, | |
| "loss": 0.0, | |
| "step": 1271 | |
| }, | |
| { | |
| "epoch": 2.6390041493775933, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.309648781556522e-05, | |
| "loss": 0.0, | |
| "step": 1272 | |
| }, | |
| { | |
| "epoch": 2.641078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.303872220360737e-05, | |
| "loss": 0.0, | |
| "step": 1273 | |
| }, | |
| { | |
| "epoch": 2.6431535269709543, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.2980950216776105e-05, | |
| "loss": 0.0, | |
| "step": 1274 | |
| }, | |
| { | |
| "epoch": 2.645228215767635, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.292317197627011e-05, | |
| "loss": 0.0, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 2.6473029045643153, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.286538760330119e-05, | |
| "loss": 0.0, | |
| "step": 1276 | |
| }, | |
| { | |
| "epoch": 2.6493775933609958, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.280759721909399e-05, | |
| "loss": 0.0, | |
| "step": 1277 | |
| }, | |
| { | |
| "epoch": 2.6514522821576763, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.274980094488579e-05, | |
| "loss": 0.0, | |
| "step": 1278 | |
| }, | |
| { | |
| "epoch": 2.6535269709543567, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.2691998901926196e-05, | |
| "loss": 0.0, | |
| "step": 1279 | |
| }, | |
| { | |
| "epoch": 2.6556016597510372, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.263419121147696e-05, | |
| "loss": 0.0, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.6576763485477177, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.257637799481165e-05, | |
| "loss": 0.0, | |
| "step": 1281 | |
| }, | |
| { | |
| "epoch": 2.659751037344398, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.2518559373215426e-05, | |
| "loss": 0.0, | |
| "step": 1282 | |
| }, | |
| { | |
| "epoch": 2.6618257261410787, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.2460735467984816e-05, | |
| "loss": 0.0, | |
| "step": 1283 | |
| }, | |
| { | |
| "epoch": 2.663900414937759, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.240290640042739e-05, | |
| "loss": 0.0, | |
| "step": 1284 | |
| }, | |
| { | |
| "epoch": 2.6659751037344397, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.234507229186158e-05, | |
| "loss": 0.0, | |
| "step": 1285 | |
| }, | |
| { | |
| "epoch": 2.66804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.2287233263616387e-05, | |
| "loss": 0.0, | |
| "step": 1286 | |
| }, | |
| { | |
| "epoch": 2.6701244813278007, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.222938943703113e-05, | |
| "loss": 0.0, | |
| "step": 1287 | |
| }, | |
| { | |
| "epoch": 2.6721991701244816, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.217154093345519e-05, | |
| "loss": 0.0, | |
| "step": 1288 | |
| }, | |
| { | |
| "epoch": 2.674273858921162, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.211368787424778e-05, | |
| "loss": 0.0, | |
| "step": 1289 | |
| }, | |
| { | |
| "epoch": 2.6763485477178426, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.205583038077762e-05, | |
| "loss": 0.0, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.678423236514523, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.199796857442279e-05, | |
| "loss": 0.0, | |
| "step": 1291 | |
| }, | |
| { | |
| "epoch": 2.6804979253112036, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1940102576570415e-05, | |
| "loss": 0.0, | |
| "step": 1292 | |
| }, | |
| { | |
| "epoch": 2.682572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.188223250861635e-05, | |
| "loss": 0.0, | |
| "step": 1293 | |
| }, | |
| { | |
| "epoch": 2.6846473029045645, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1824358491965054e-05, | |
| "loss": 0.0, | |
| "step": 1294 | |
| }, | |
| { | |
| "epoch": 2.686721991701245, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.176648064802925e-05, | |
| "loss": 0.0, | |
| "step": 1295 | |
| }, | |
| { | |
| "epoch": 2.6887966804979255, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.170859909822968e-05, | |
| "loss": 0.0, | |
| "step": 1296 | |
| }, | |
| { | |
| "epoch": 2.690871369294606, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.165071396399487e-05, | |
| "loss": 0.0, | |
| "step": 1297 | |
| }, | |
| { | |
| "epoch": 2.6929460580912865, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.159282536676086e-05, | |
| "loss": 0.0, | |
| "step": 1298 | |
| }, | |
| { | |
| "epoch": 2.695020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.153493342797098e-05, | |
| "loss": 0.0, | |
| "step": 1299 | |
| }, | |
| { | |
| "epoch": 2.6970954356846475, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.147703826907553e-05, | |
| "loss": 0.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.699170124481328, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.141914001153157e-05, | |
| "loss": 0.0, | |
| "step": 1301 | |
| }, | |
| { | |
| "epoch": 2.7012448132780085, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1361238776802705e-05, | |
| "loss": 0.0, | |
| "step": 1302 | |
| }, | |
| { | |
| "epoch": 2.703319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.130333468635875e-05, | |
| "loss": 0.0, | |
| "step": 1303 | |
| }, | |
| { | |
| "epoch": 2.7053941908713695, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1245427861675494e-05, | |
| "loss": 0.0, | |
| "step": 1304 | |
| }, | |
| { | |
| "epoch": 2.70746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.118751842423451e-05, | |
| "loss": 0.0, | |
| "step": 1305 | |
| }, | |
| { | |
| "epoch": 2.7095435684647304, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1129606495522815e-05, | |
| "loss": 0.0, | |
| "step": 1306 | |
| }, | |
| { | |
| "epoch": 2.711618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1071692197032664e-05, | |
| "loss": 0.0, | |
| "step": 1307 | |
| }, | |
| { | |
| "epoch": 2.7136929460580914, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1013775650261287e-05, | |
| "loss": 0.0, | |
| "step": 1308 | |
| }, | |
| { | |
| "epoch": 2.715767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.095585697671063e-05, | |
| "loss": 0.0, | |
| "step": 1309 | |
| }, | |
| { | |
| "epoch": 2.7178423236514524, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.089793629788708e-05, | |
| "loss": 0.0, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.719917012448133, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.0840013735301276e-05, | |
| "loss": 0.0, | |
| "step": 1311 | |
| }, | |
| { | |
| "epoch": 2.7219917012448134, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.078208941046777e-05, | |
| "loss": 0.0, | |
| "step": 1312 | |
| }, | |
| { | |
| "epoch": 2.724066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.072416344490483e-05, | |
| "loss": 0.0, | |
| "step": 1313 | |
| }, | |
| { | |
| "epoch": 2.7261410788381744, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.066623596013415e-05, | |
| "loss": 0.0, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.728215767634855, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.0608307077680614e-05, | |
| "loss": 0.0, | |
| "step": 1315 | |
| }, | |
| { | |
| "epoch": 2.7302904564315353, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.055037691907206e-05, | |
| "loss": 0.0, | |
| "step": 1316 | |
| }, | |
| { | |
| "epoch": 2.732365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.049244560583898e-05, | |
| "loss": 0.0, | |
| "step": 1317 | |
| }, | |
| { | |
| "epoch": 2.7344398340248963, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.043451325951429e-05, | |
| "loss": 0.0, | |
| "step": 1318 | |
| }, | |
| { | |
| "epoch": 2.736514522821577, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.03765800016331e-05, | |
| "loss": 0.0, | |
| "step": 1319 | |
| }, | |
| { | |
| "epoch": 2.7385892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.031864595373239e-05, | |
| "loss": 0.0, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.740663900414938, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.026071123735081e-05, | |
| "loss": 0.0, | |
| "step": 1321 | |
| }, | |
| { | |
| "epoch": 2.7427385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.020277597402846e-05, | |
| "loss": 0.0, | |
| "step": 1322 | |
| }, | |
| { | |
| "epoch": 2.7448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.0144840285306506e-05, | |
| "loss": 0.0, | |
| "step": 1323 | |
| }, | |
| { | |
| "epoch": 2.7468879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.008690429272708e-05, | |
| "loss": 0.0, | |
| "step": 1324 | |
| }, | |
| { | |
| "epoch": 2.7489626556016598, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.0028968117832906e-05, | |
| "loss": 0.0, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 2.7510373443983402, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.99710318821671e-05, | |
| "loss": 0.0, | |
| "step": 1326 | |
| }, | |
| { | |
| "epoch": 2.7531120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.991309570727293e-05, | |
| "loss": 0.0, | |
| "step": 1327 | |
| }, | |
| { | |
| "epoch": 2.7551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.985515971469351e-05, | |
| "loss": 0.0, | |
| "step": 1328 | |
| }, | |
| { | |
| "epoch": 2.7572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.979722402597156e-05, | |
| "loss": 0.0, | |
| "step": 1329 | |
| }, | |
| { | |
| "epoch": 2.759336099585062, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9739288762649195e-05, | |
| "loss": 0.0, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.7614107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9681354046267626e-05, | |
| "loss": 0.0, | |
| "step": 1331 | |
| }, | |
| { | |
| "epoch": 2.763485477178423, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.962341999836691e-05, | |
| "loss": 0.0, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 2.7655601659751037, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.956548674048572e-05, | |
| "loss": 0.0, | |
| "step": 1333 | |
| }, | |
| { | |
| "epoch": 2.767634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.950755439416103e-05, | |
| "loss": 0.0, | |
| "step": 1334 | |
| }, | |
| { | |
| "epoch": 2.7697095435684647, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.944962308092795e-05, | |
| "loss": 0.0, | |
| "step": 1335 | |
| }, | |
| { | |
| "epoch": 2.771784232365145, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.939169292231939e-05, | |
| "loss": 0.0, | |
| "step": 1336 | |
| }, | |
| { | |
| "epoch": 2.7738589211618256, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9333764039865864e-05, | |
| "loss": 0.0, | |
| "step": 1337 | |
| }, | |
| { | |
| "epoch": 2.775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.927583655509518e-05, | |
| "loss": 0.0, | |
| "step": 1338 | |
| }, | |
| { | |
| "epoch": 2.7780082987551866, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9217910589532245e-05, | |
| "loss": 0.0, | |
| "step": 1339 | |
| }, | |
| { | |
| "epoch": 2.780082987551867, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.915998626469873e-05, | |
| "loss": 0.0, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.7821576763485476, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9102063702112933e-05, | |
| "loss": 0.0, | |
| "step": 1341 | |
| }, | |
| { | |
| "epoch": 2.784232365145228, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9044143023289386e-05, | |
| "loss": 0.0, | |
| "step": 1342 | |
| }, | |
| { | |
| "epoch": 2.7863070539419086, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.8986224349738734e-05, | |
| "loss": 0.0, | |
| "step": 1343 | |
| }, | |
| { | |
| "epoch": 2.788381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.892830780296736e-05, | |
| "loss": 0.0, | |
| "step": 1344 | |
| }, | |
| { | |
| "epoch": 2.7904564315352696, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.8870393504477205e-05, | |
| "loss": 0.0, | |
| "step": 1345 | |
| }, | |
| { | |
| "epoch": 2.79253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.881248157576551e-05, | |
| "loss": 0.0, | |
| "step": 1346 | |
| }, | |
| { | |
| "epoch": 2.7946058091286305, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.8754572138324526e-05, | |
| "loss": 0.0, | |
| "step": 1347 | |
| }, | |
| { | |
| "epoch": 2.796680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.869666531364127e-05, | |
| "loss": 0.0, | |
| "step": 1348 | |
| }, | |
| { | |
| "epoch": 2.7987551867219915, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.863876122319731e-05, | |
| "loss": 0.0, | |
| "step": 1349 | |
| }, | |
| { | |
| "epoch": 2.800829875518672, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.858085998846844e-05, | |
| "loss": 0.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.8029045643153525, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.8522961730924493e-05, | |
| "loss": 0.0, | |
| "step": 1351 | |
| }, | |
| { | |
| "epoch": 2.804979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.846506657202904e-05, | |
| "loss": 0.0, | |
| "step": 1352 | |
| }, | |
| { | |
| "epoch": 2.8070539419087135, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.840717463323915e-05, | |
| "loss": 0.0, | |
| "step": 1353 | |
| }, | |
| { | |
| "epoch": 2.809128630705394, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.834928603600515e-05, | |
| "loss": 0.0, | |
| "step": 1354 | |
| }, | |
| { | |
| "epoch": 2.8112033195020745, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.829140090177034e-05, | |
| "loss": 0.0, | |
| "step": 1355 | |
| }, | |
| { | |
| "epoch": 2.813278008298755, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.823351935197077e-05, | |
| "loss": 0.0, | |
| "step": 1356 | |
| }, | |
| { | |
| "epoch": 2.8153526970954355, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.8175641508034966e-05, | |
| "loss": 0.0, | |
| "step": 1357 | |
| }, | |
| { | |
| "epoch": 2.817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.8117767491383666e-05, | |
| "loss": 0.0, | |
| "step": 1358 | |
| }, | |
| { | |
| "epoch": 2.8195020746887964, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.805989742342961e-05, | |
| "loss": 0.0, | |
| "step": 1359 | |
| }, | |
| { | |
| "epoch": 2.821576763485477, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.80020314255772e-05, | |
| "loss": 0.0, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.8236514522821574, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.794416961922239e-05, | |
| "loss": 0.0, | |
| "step": 1361 | |
| }, | |
| { | |
| "epoch": 2.825726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.788631212575223e-05, | |
| "loss": 0.0, | |
| "step": 1362 | |
| }, | |
| { | |
| "epoch": 2.8278008298755184, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.782845906654481e-05, | |
| "loss": 0.0, | |
| "step": 1363 | |
| }, | |
| { | |
| "epoch": 2.8298755186721993, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7770610562968876e-05, | |
| "loss": 0.0, | |
| "step": 1364 | |
| }, | |
| { | |
| "epoch": 2.83195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.771276673638361e-05, | |
| "loss": 0.0, | |
| "step": 1365 | |
| }, | |
| { | |
| "epoch": 2.8340248962655603, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.765492770813843e-05, | |
| "loss": 0.0, | |
| "step": 1366 | |
| }, | |
| { | |
| "epoch": 2.836099585062241, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.759709359957262e-05, | |
| "loss": 0.0, | |
| "step": 1367 | |
| }, | |
| { | |
| "epoch": 2.8381742738589213, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.753926453201519e-05, | |
| "loss": 0.0, | |
| "step": 1368 | |
| }, | |
| { | |
| "epoch": 2.840248962655602, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.748144062678457e-05, | |
| "loss": 0.0, | |
| "step": 1369 | |
| }, | |
| { | |
| "epoch": 2.8423236514522823, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7423622005188353e-05, | |
| "loss": 0.0, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.8443983402489628, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7365808788523044e-05, | |
| "loss": 0.0, | |
| "step": 1371 | |
| }, | |
| { | |
| "epoch": 2.8464730290456433, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.730800109807381e-05, | |
| "loss": 0.0, | |
| "step": 1372 | |
| }, | |
| { | |
| "epoch": 2.8485477178423237, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.725019905511421e-05, | |
| "loss": 0.0, | |
| "step": 1373 | |
| }, | |
| { | |
| "epoch": 2.8506224066390042, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.719240278090601e-05, | |
| "loss": 0.0, | |
| "step": 1374 | |
| }, | |
| { | |
| "epoch": 2.8526970954356847, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7134612396698816e-05, | |
| "loss": 0.0, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 2.854771784232365, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.707682802372989e-05, | |
| "loss": 0.0, | |
| "step": 1376 | |
| }, | |
| { | |
| "epoch": 2.8568464730290457, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.701904978322391e-05, | |
| "loss": 0.0, | |
| "step": 1377 | |
| }, | |
| { | |
| "epoch": 2.858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.696127779639264e-05, | |
| "loss": 0.0, | |
| "step": 1378 | |
| }, | |
| { | |
| "epoch": 2.8609958506224067, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6903512184434786e-05, | |
| "loss": 0.0, | |
| "step": 1379 | |
| }, | |
| { | |
| "epoch": 2.863070539419087, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.684575306853564e-05, | |
| "loss": 0.0, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.8651452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.678800056986686e-05, | |
| "loss": 0.0, | |
| "step": 1381 | |
| }, | |
| { | |
| "epoch": 2.867219917012448, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6730254809586255e-05, | |
| "loss": 0.0, | |
| "step": 1382 | |
| }, | |
| { | |
| "epoch": 2.8692946058091287, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.667251590883747e-05, | |
| "loss": 0.0, | |
| "step": 1383 | |
| }, | |
| { | |
| "epoch": 2.871369294605809, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.661478398874976e-05, | |
| "loss": 0.0, | |
| "step": 1384 | |
| }, | |
| { | |
| "epoch": 2.8734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.655705917043776e-05, | |
| "loss": 0.0, | |
| "step": 1385 | |
| }, | |
| { | |
| "epoch": 2.87551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6499341575001184e-05, | |
| "loss": 0.0, | |
| "step": 1386 | |
| }, | |
| { | |
| "epoch": 2.8775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6441631323524585e-05, | |
| "loss": 0.0, | |
| "step": 1387 | |
| }, | |
| { | |
| "epoch": 2.879668049792531, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.638392853707715e-05, | |
| "loss": 0.0, | |
| "step": 1388 | |
| }, | |
| { | |
| "epoch": 2.8817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.632623333671236e-05, | |
| "loss": 0.0, | |
| "step": 1389 | |
| }, | |
| { | |
| "epoch": 2.883817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6268545843467816e-05, | |
| "loss": 0.0, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.8858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.621086617836493e-05, | |
| "loss": 0.0, | |
| "step": 1391 | |
| }, | |
| { | |
| "epoch": 2.887966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.615319446240867e-05, | |
| "loss": 0.0, | |
| "step": 1392 | |
| }, | |
| { | |
| "epoch": 2.8900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6095530816587394e-05, | |
| "loss": 0.0, | |
| "step": 1393 | |
| }, | |
| { | |
| "epoch": 2.892116182572614, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.603787536187247e-05, | |
| "loss": 0.0, | |
| "step": 1394 | |
| }, | |
| { | |
| "epoch": 2.8941908713692945, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.59802282192181e-05, | |
| "loss": 0.0, | |
| "step": 1395 | |
| }, | |
| { | |
| "epoch": 2.896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.592258950956105e-05, | |
| "loss": 0.0, | |
| "step": 1396 | |
| }, | |
| { | |
| "epoch": 2.8983402489626555, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5864959353820405e-05, | |
| "loss": 0.0, | |
| "step": 1397 | |
| }, | |
| { | |
| "epoch": 2.900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5807337872897295e-05, | |
| "loss": 0.0, | |
| "step": 1398 | |
| }, | |
| { | |
| "epoch": 2.9024896265560165, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.574972518767463e-05, | |
| "loss": 0.0, | |
| "step": 1399 | |
| }, | |
| { | |
| "epoch": 2.904564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.569212141901691e-05, | |
| "loss": 0.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.9066390041493775, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.56345266877699e-05, | |
| "loss": 0.0, | |
| "step": 1401 | |
| }, | |
| { | |
| "epoch": 2.908713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5576941114760415e-05, | |
| "loss": 0.0, | |
| "step": 1402 | |
| }, | |
| { | |
| "epoch": 2.9107883817427385, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5519364820796046e-05, | |
| "loss": 0.0, | |
| "step": 1403 | |
| }, | |
| { | |
| "epoch": 2.912863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.546179792666495e-05, | |
| "loss": 0.0, | |
| "step": 1404 | |
| }, | |
| { | |
| "epoch": 2.9149377593360994, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.540424055313552e-05, | |
| "loss": 0.0, | |
| "step": 1405 | |
| }, | |
| { | |
| "epoch": 2.91701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5346692820956194e-05, | |
| "loss": 0.0, | |
| "step": 1406 | |
| }, | |
| { | |
| "epoch": 2.9190871369294604, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.528915485085521e-05, | |
| "loss": 0.0, | |
| "step": 1407 | |
| }, | |
| { | |
| "epoch": 2.921161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.523162676354028e-05, | |
| "loss": 0.0, | |
| "step": 1408 | |
| }, | |
| { | |
| "epoch": 2.923236514522822, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5174108679698425e-05, | |
| "loss": 0.0, | |
| "step": 1409 | |
| }, | |
| { | |
| "epoch": 2.9253112033195023, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.511660071999565e-05, | |
| "loss": 0.0, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.927385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5059103005076735e-05, | |
| "loss": 0.0, | |
| "step": 1411 | |
| }, | |
| { | |
| "epoch": 2.9294605809128633, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5001615655564966e-05, | |
| "loss": 0.0, | |
| "step": 1412 | |
| }, | |
| { | |
| "epoch": 2.931535269709544, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.494413879206188e-05, | |
| "loss": 0.0, | |
| "step": 1413 | |
| }, | |
| { | |
| "epoch": 2.9336099585062243, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.488667253514701e-05, | |
| "loss": 0.0, | |
| "step": 1414 | |
| }, | |
| { | |
| "epoch": 2.935684647302905, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4829217005377665e-05, | |
| "loss": 0.0, | |
| "step": 1415 | |
| }, | |
| { | |
| "epoch": 2.9377593360995853, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.477177232328861e-05, | |
| "loss": 0.0, | |
| "step": 1416 | |
| }, | |
| { | |
| "epoch": 2.9398340248962658, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4714338609391895e-05, | |
| "loss": 0.0, | |
| "step": 1417 | |
| }, | |
| { | |
| "epoch": 2.9419087136929463, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4656915984176524e-05, | |
| "loss": 0.0, | |
| "step": 1418 | |
| }, | |
| { | |
| "epoch": 2.9439834024896268, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.459950456810825e-05, | |
| "loss": 0.0, | |
| "step": 1419 | |
| }, | |
| { | |
| "epoch": 2.9460580912863072, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.454210448162932e-05, | |
| "loss": 0.0, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.9481327800829877, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4484715845158206e-05, | |
| "loss": 0.0, | |
| "step": 1421 | |
| }, | |
| { | |
| "epoch": 2.9502074688796682, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.442733877908935e-05, | |
| "loss": 0.0, | |
| "step": 1422 | |
| }, | |
| { | |
| "epoch": 2.9522821576763487, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.436997340379295e-05, | |
| "loss": 0.0, | |
| "step": 1423 | |
| }, | |
| { | |
| "epoch": 2.954356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.431261983961463e-05, | |
| "loss": 0.0, | |
| "step": 1424 | |
| }, | |
| { | |
| "epoch": 2.9564315352697097, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4255278206875285e-05, | |
| "loss": 0.0, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 2.95850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4197948625870766e-05, | |
| "loss": 0.0, | |
| "step": 1426 | |
| }, | |
| { | |
| "epoch": 2.9605809128630707, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.41406312168716e-05, | |
| "loss": 0.0, | |
| "step": 1427 | |
| }, | |
| { | |
| "epoch": 2.962655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.408332610012282e-05, | |
| "loss": 0.0, | |
| "step": 1428 | |
| }, | |
| { | |
| "epoch": 2.9647302904564317, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4026033395843697e-05, | |
| "loss": 0.0, | |
| "step": 1429 | |
| }, | |
| { | |
| "epoch": 2.966804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3968753224227406e-05, | |
| "loss": 0.0, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.9688796680497926, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3911485705440834e-05, | |
| "loss": 0.0, | |
| "step": 1431 | |
| }, | |
| { | |
| "epoch": 2.970954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3854230959624375e-05, | |
| "loss": 0.0, | |
| "step": 1432 | |
| }, | |
| { | |
| "epoch": 2.9730290456431536, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.379698910689158e-05, | |
| "loss": 0.0, | |
| "step": 1433 | |
| }, | |
| { | |
| "epoch": 2.975103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3739760267328954e-05, | |
| "loss": 0.0, | |
| "step": 1434 | |
| }, | |
| { | |
| "epoch": 2.9771784232365146, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.368254456099572e-05, | |
| "loss": 0.0, | |
| "step": 1435 | |
| }, | |
| { | |
| "epoch": 2.979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.362534210792355e-05, | |
| "loss": 0.0, | |
| "step": 1436 | |
| }, | |
| { | |
| "epoch": 2.9813278008298756, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.35681530281163e-05, | |
| "loss": 0.0, | |
| "step": 1437 | |
| }, | |
| { | |
| "epoch": 2.983402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.351097744154979e-05, | |
| "loss": 0.0, | |
| "step": 1438 | |
| }, | |
| { | |
| "epoch": 2.9854771784232366, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.345381546817148e-05, | |
| "loss": 0.0, | |
| "step": 1439 | |
| }, | |
| { | |
| "epoch": 2.987551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.339666722790035e-05, | |
| "loss": 0.0, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.9896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3339532840626514e-05, | |
| "loss": 0.0, | |
| "step": 1441 | |
| }, | |
| { | |
| "epoch": 2.991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3282412426211026e-05, | |
| "loss": 0.0, | |
| "step": 1442 | |
| }, | |
| { | |
| "epoch": 2.9937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.322530610448566e-05, | |
| "loss": 0.0, | |
| "step": 1443 | |
| }, | |
| { | |
| "epoch": 2.995850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3168213995252594e-05, | |
| "loss": 0.0, | |
| "step": 1444 | |
| }, | |
| { | |
| "epoch": 2.9979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.31111362182842e-05, | |
| "loss": 0.0, | |
| "step": 1445 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.305407289332279e-05, | |
| "loss": 0.0, | |
| "step": 1446 | |
| }, | |
| { | |
| "epoch": 3.0020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2997024140080345e-05, | |
| "loss": 0.0, | |
| "step": 1447 | |
| }, | |
| { | |
| "epoch": 3.004149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2939990078238284e-05, | |
| "loss": 0.0, | |
| "step": 1448 | |
| }, | |
| { | |
| "epoch": 3.0062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.288297082744721e-05, | |
| "loss": 0.0, | |
| "step": 1449 | |
| }, | |
| { | |
| "epoch": 3.008298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.282596650732664e-05, | |
| "loss": 0.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 3.0103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.276897723746477e-05, | |
| "loss": 0.0, | |
| "step": 1451 | |
| }, | |
| { | |
| "epoch": 3.012448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2712003137418244e-05, | |
| "loss": 0.0, | |
| "step": 1452 | |
| }, | |
| { | |
| "epoch": 3.0145228215767634, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.265504432671185e-05, | |
| "loss": 0.0, | |
| "step": 1453 | |
| }, | |
| { | |
| "epoch": 3.016597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2598100924838326e-05, | |
| "loss": 0.0, | |
| "step": 1454 | |
| }, | |
| { | |
| "epoch": 3.0186721991701244, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.254117305125807e-05, | |
| "loss": 0.0, | |
| "step": 1455 | |
| }, | |
| { | |
| "epoch": 3.020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.248426082539892e-05, | |
| "loss": 0.0, | |
| "step": 1456 | |
| }, | |
| { | |
| "epoch": 3.0228215767634854, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.242736436665586e-05, | |
| "loss": 0.0, | |
| "step": 1457 | |
| }, | |
| { | |
| "epoch": 3.024896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2370483794390817e-05, | |
| "loss": 0.0, | |
| "step": 1458 | |
| }, | |
| { | |
| "epoch": 3.0269709543568464, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2313619227932386e-05, | |
| "loss": 0.0, | |
| "step": 1459 | |
| }, | |
| { | |
| "epoch": 3.029045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.225677078657559e-05, | |
| "loss": 0.0, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 3.0311203319502074, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2199938589581586e-05, | |
| "loss": 0.0, | |
| "step": 1461 | |
| }, | |
| { | |
| "epoch": 3.033195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2143122756177524e-05, | |
| "loss": 0.0, | |
| "step": 1462 | |
| }, | |
| { | |
| "epoch": 3.0352697095435683, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.208632340555615e-05, | |
| "loss": 0.0, | |
| "step": 1463 | |
| }, | |
| { | |
| "epoch": 3.037344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.202954065687569e-05, | |
| "loss": 0.0, | |
| "step": 1464 | |
| }, | |
| { | |
| "epoch": 3.0394190871369293, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.19727746292595e-05, | |
| "loss": 0.0, | |
| "step": 1465 | |
| }, | |
| { | |
| "epoch": 3.04149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.191602544179587e-05, | |
| "loss": 0.0, | |
| "step": 1466 | |
| }, | |
| { | |
| "epoch": 3.0435684647302903, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.185929321353778e-05, | |
| "loss": 0.0, | |
| "step": 1467 | |
| }, | |
| { | |
| "epoch": 3.045643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.18025780635026e-05, | |
| "loss": 0.0, | |
| "step": 1468 | |
| }, | |
| { | |
| "epoch": 3.0477178423236513, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.174588011067189e-05, | |
| "loss": 0.0, | |
| "step": 1469 | |
| }, | |
| { | |
| "epoch": 3.0497925311203318, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1689199473991145e-05, | |
| "loss": 0.0, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 3.0518672199170123, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.16325362723695e-05, | |
| "loss": 0.0, | |
| "step": 1471 | |
| }, | |
| { | |
| "epoch": 3.0539419087136928, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.157589062467954e-05, | |
| "loss": 0.0, | |
| "step": 1472 | |
| }, | |
| { | |
| "epoch": 3.0560165975103732, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.151926264975701e-05, | |
| "loss": 0.0, | |
| "step": 1473 | |
| }, | |
| { | |
| "epoch": 3.0580912863070537, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1462652466400595e-05, | |
| "loss": 0.0, | |
| "step": 1474 | |
| }, | |
| { | |
| "epoch": 3.0601659751037342, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.140606019337164e-05, | |
| "loss": 0.0, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 3.0622406639004147, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.134948594939392e-05, | |
| "loss": 0.0, | |
| "step": 1476 | |
| }, | |
| { | |
| "epoch": 3.064315352697095, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1292929853153376e-05, | |
| "loss": 0.0, | |
| "step": 1477 | |
| }, | |
| { | |
| "epoch": 3.066390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1236392023297915e-05, | |
| "loss": 0.0, | |
| "step": 1478 | |
| }, | |
| { | |
| "epoch": 3.0684647302904566, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.117987257843709e-05, | |
| "loss": 0.0, | |
| "step": 1479 | |
| }, | |
| { | |
| "epoch": 3.070539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1123371637141884e-05, | |
| "loss": 0.0, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 3.0726141078838176, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.106688931794448e-05, | |
| "loss": 0.0, | |
| "step": 1481 | |
| }, | |
| { | |
| "epoch": 3.074688796680498, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.101042573933797e-05, | |
| "loss": 0.0, | |
| "step": 1482 | |
| }, | |
| { | |
| "epoch": 3.0767634854771786, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.095398101977617e-05, | |
| "loss": 0.0, | |
| "step": 1483 | |
| }, | |
| { | |
| "epoch": 3.078838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0897555277673295e-05, | |
| "loss": 0.0, | |
| "step": 1484 | |
| }, | |
| { | |
| "epoch": 3.0809128630705396, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0841148631403746e-05, | |
| "loss": 0.0, | |
| "step": 1485 | |
| }, | |
| { | |
| "epoch": 3.08298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0784761199301896e-05, | |
| "loss": 0.0, | |
| "step": 1486 | |
| }, | |
| { | |
| "epoch": 3.0850622406639006, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.072839309966179e-05, | |
| "loss": 0.0, | |
| "step": 1487 | |
| }, | |
| { | |
| "epoch": 3.087136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0672044450736895e-05, | |
| "loss": 0.0, | |
| "step": 1488 | |
| }, | |
| { | |
| "epoch": 3.0892116182572615, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.061571537073992e-05, | |
| "loss": 0.0, | |
| "step": 1489 | |
| }, | |
| { | |
| "epoch": 3.091286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.055940597784247e-05, | |
| "loss": 0.0, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 3.0933609958506225, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0503116390174902e-05, | |
| "loss": 0.0, | |
| "step": 1491 | |
| }, | |
| { | |
| "epoch": 3.095435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0446846725825972e-05, | |
| "loss": 0.0, | |
| "step": 1492 | |
| }, | |
| { | |
| "epoch": 3.0975103734439835, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.039059710284266e-05, | |
| "loss": 0.0, | |
| "step": 1493 | |
| }, | |
| { | |
| "epoch": 3.099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0334367639229923e-05, | |
| "loss": 0.0, | |
| "step": 1494 | |
| }, | |
| { | |
| "epoch": 3.1016597510373445, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.027815845295038e-05, | |
| "loss": 0.0, | |
| "step": 1495 | |
| }, | |
| { | |
| "epoch": 3.103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.022196966192417e-05, | |
| "loss": 0.0, | |
| "step": 1496 | |
| }, | |
| { | |
| "epoch": 3.1058091286307055, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0165801384028595e-05, | |
| "loss": 0.0, | |
| "step": 1497 | |
| }, | |
| { | |
| "epoch": 3.107883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.010965373709795e-05, | |
| "loss": 0.0, | |
| "step": 1498 | |
| }, | |
| { | |
| "epoch": 3.1099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0053526838923234e-05, | |
| "loss": 0.0, | |
| "step": 1499 | |
| }, | |
| { | |
| "epoch": 3.112033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9997420807251913e-05, | |
| "loss": 0.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.1141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9941335759787703e-05, | |
| "loss": 0.0, | |
| "step": 1501 | |
| }, | |
| { | |
| "epoch": 3.116182572614108, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9885271814190272e-05, | |
| "loss": 0.0, | |
| "step": 1502 | |
| }, | |
| { | |
| "epoch": 3.1182572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9829229088075043e-05, | |
| "loss": 0.0, | |
| "step": 1503 | |
| }, | |
| { | |
| "epoch": 3.120331950207469, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9773207699012897e-05, | |
| "loss": 0.0, | |
| "step": 1504 | |
| }, | |
| { | |
| "epoch": 3.1224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9717207764529963e-05, | |
| "loss": 0.0, | |
| "step": 1505 | |
| }, | |
| { | |
| "epoch": 3.12448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9661229402107373e-05, | |
| "loss": 0.0, | |
| "step": 1506 | |
| }, | |
| { | |
| "epoch": 3.1265560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9605272729180994e-05, | |
| "loss": 0.0, | |
| "step": 1507 | |
| }, | |
| { | |
| "epoch": 3.128630705394191, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9549337863141172e-05, | |
| "loss": 0.0, | |
| "step": 1508 | |
| }, | |
| { | |
| "epoch": 3.1307053941908713, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9493424921332546e-05, | |
| "loss": 0.0, | |
| "step": 1509 | |
| }, | |
| { | |
| "epoch": 3.132780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9437534021053722e-05, | |
| "loss": 0.0, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 3.1348547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9381665279557083e-05, | |
| "loss": 0.0, | |
| "step": 1511 | |
| }, | |
| { | |
| "epoch": 3.136929460580913, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.932581881404853e-05, | |
| "loss": 0.0, | |
| "step": 1512 | |
| }, | |
| { | |
| "epoch": 3.1390041493775933, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9269994741687212e-05, | |
| "loss": 0.0, | |
| "step": 1513 | |
| }, | |
| { | |
| "epoch": 3.141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9214193179585337e-05, | |
| "loss": 0.0, | |
| "step": 1514 | |
| }, | |
| { | |
| "epoch": 3.1431535269709543, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9158414244807843e-05, | |
| "loss": 0.0, | |
| "step": 1515 | |
| }, | |
| { | |
| "epoch": 3.145228215767635, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9102658054372228e-05, | |
| "loss": 0.0, | |
| "step": 1516 | |
| }, | |
| { | |
| "epoch": 3.1473029045643153, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9046924725248275e-05, | |
| "loss": 0.0, | |
| "step": 1517 | |
| }, | |
| { | |
| "epoch": 3.1493775933609958, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8991214374357784e-05, | |
| "loss": 0.0, | |
| "step": 1518 | |
| }, | |
| { | |
| "epoch": 3.1514522821576763, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.893552711857438e-05, | |
| "loss": 0.0, | |
| "step": 1519 | |
| }, | |
| { | |
| "epoch": 3.1535269709543567, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8879863074723217e-05, | |
| "loss": 0.0, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 3.1556016597510372, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8824222359580754e-05, | |
| "loss": 0.0, | |
| "step": 1521 | |
| }, | |
| { | |
| "epoch": 3.1576763485477177, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.876860508987452e-05, | |
| "loss": 0.0, | |
| "step": 1522 | |
| }, | |
| { | |
| "epoch": 3.159751037344398, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8713011382282857e-05, | |
| "loss": 0.0, | |
| "step": 1523 | |
| }, | |
| { | |
| "epoch": 3.1618257261410787, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8657441353434658e-05, | |
| "loss": 0.0, | |
| "step": 1524 | |
| }, | |
| { | |
| "epoch": 3.163900414937759, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8601895119909167e-05, | |
| "loss": 0.0, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 3.1659751037344397, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8546372798235686e-05, | |
| "loss": 0.0, | |
| "step": 1526 | |
| }, | |
| { | |
| "epoch": 3.16804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.849087450489336e-05, | |
| "loss": 0.0, | |
| "step": 1527 | |
| }, | |
| { | |
| "epoch": 3.1701244813278007, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8435400356310944e-05, | |
| "loss": 0.0, | |
| "step": 1528 | |
| }, | |
| { | |
| "epoch": 3.172199170124481, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8379950468866505e-05, | |
| "loss": 0.0, | |
| "step": 1529 | |
| }, | |
| { | |
| "epoch": 3.1742738589211617, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8324524958887248e-05, | |
| "loss": 0.0, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 3.176348547717842, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8269123942649214e-05, | |
| "loss": 0.0, | |
| "step": 1531 | |
| }, | |
| { | |
| "epoch": 3.1784232365145226, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8213747536377067e-05, | |
| "loss": 0.0, | |
| "step": 1532 | |
| }, | |
| { | |
| "epoch": 3.180497925311203, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8158395856243842e-05, | |
| "loss": 0.0, | |
| "step": 1533 | |
| }, | |
| { | |
| "epoch": 3.1825726141078836, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8103069018370702e-05, | |
| "loss": 0.0, | |
| "step": 1534 | |
| }, | |
| { | |
| "epoch": 3.1846473029045645, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8047767138826683e-05, | |
| "loss": 0.0, | |
| "step": 1535 | |
| }, | |
| { | |
| "epoch": 3.186721991701245, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7992490333628493e-05, | |
| "loss": 0.0, | |
| "step": 1536 | |
| }, | |
| { | |
| "epoch": 3.1887966804979255, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7937238718740197e-05, | |
| "loss": 0.0, | |
| "step": 1537 | |
| }, | |
| { | |
| "epoch": 3.190871369294606, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7882012410073032e-05, | |
| "loss": 0.0, | |
| "step": 1538 | |
| }, | |
| { | |
| "epoch": 3.1929460580912865, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.782681152348516e-05, | |
| "loss": 0.0, | |
| "step": 1539 | |
| }, | |
| { | |
| "epoch": 3.195020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.777163617478138e-05, | |
| "loss": 0.0, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 3.1970954356846475, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7716486479712945e-05, | |
| "loss": 0.0, | |
| "step": 1541 | |
| }, | |
| { | |
| "epoch": 3.199170124481328, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7661362553977276e-05, | |
| "loss": 0.0, | |
| "step": 1542 | |
| }, | |
| { | |
| "epoch": 3.2012448132780085, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7606264513217715e-05, | |
| "loss": 0.0, | |
| "step": 1543 | |
| }, | |
| { | |
| "epoch": 3.203319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.755119247302335e-05, | |
| "loss": 0.0, | |
| "step": 1544 | |
| }, | |
| { | |
| "epoch": 3.2053941908713695, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.749614654892867e-05, | |
| "loss": 0.0, | |
| "step": 1545 | |
| }, | |
| { | |
| "epoch": 3.20746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.74411268564134e-05, | |
| "loss": 0.0, | |
| "step": 1546 | |
| }, | |
| { | |
| "epoch": 3.2095435684647304, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7386133510902236e-05, | |
| "loss": 0.0, | |
| "step": 1547 | |
| }, | |
| { | |
| "epoch": 3.211618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7331166627764595e-05, | |
| "loss": 0.0, | |
| "step": 1548 | |
| }, | |
| { | |
| "epoch": 3.2136929460580914, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7276226322314386e-05, | |
| "loss": 0.0, | |
| "step": 1549 | |
| }, | |
| { | |
| "epoch": 3.215767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7221312709809757e-05, | |
| "loss": 0.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 3.2178423236514524, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7166425905452843e-05, | |
| "loss": 0.0, | |
| "step": 1551 | |
| }, | |
| { | |
| "epoch": 3.219917012448133, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7111566024389575e-05, | |
| "loss": 0.0, | |
| "step": 1552 | |
| }, | |
| { | |
| "epoch": 3.2219917012448134, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7056733181709362e-05, | |
| "loss": 0.0, | |
| "step": 1553 | |
| }, | |
| { | |
| "epoch": 3.224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.700192749244491e-05, | |
| "loss": 0.0, | |
| "step": 1554 | |
| }, | |
| { | |
| "epoch": 3.2261410788381744, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6947149071571965e-05, | |
| "loss": 0.0, | |
| "step": 1555 | |
| }, | |
| { | |
| "epoch": 3.228215767634855, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6892398034009052e-05, | |
| "loss": 0.0, | |
| "step": 1556 | |
| }, | |
| { | |
| "epoch": 3.2302904564315353, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6837674494617267e-05, | |
| "loss": 0.0, | |
| "step": 1557 | |
| }, | |
| { | |
| "epoch": 3.232365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6782978568200005e-05, | |
| "loss": 0.0, | |
| "step": 1558 | |
| }, | |
| { | |
| "epoch": 3.2344398340248963, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6728310369502728e-05, | |
| "loss": 0.0, | |
| "step": 1559 | |
| }, | |
| { | |
| "epoch": 3.236514522821577, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6673670013212745e-05, | |
| "loss": 0.0, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 3.2385892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6619057613958946e-05, | |
| "loss": 0.0, | |
| "step": 1561 | |
| }, | |
| { | |
| "epoch": 3.240663900414938, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.656447328631156e-05, | |
| "loss": 0.0, | |
| "step": 1562 | |
| }, | |
| { | |
| "epoch": 3.2427385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6509917144781937e-05, | |
| "loss": 0.0, | |
| "step": 1563 | |
| }, | |
| { | |
| "epoch": 3.2448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6455389303822313e-05, | |
| "loss": 0.0, | |
| "step": 1564 | |
| }, | |
| { | |
| "epoch": 3.2468879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.640088987782553e-05, | |
| "loss": 0.0, | |
| "step": 1565 | |
| }, | |
| { | |
| "epoch": 3.2489626556016598, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6346418981124804e-05, | |
| "loss": 0.0, | |
| "step": 1566 | |
| }, | |
| { | |
| "epoch": 3.2510373443983402, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.629197672799354e-05, | |
| "loss": 0.0, | |
| "step": 1567 | |
| }, | |
| { | |
| "epoch": 3.2531120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.623756323264502e-05, | |
| "loss": 0.0, | |
| "step": 1568 | |
| }, | |
| { | |
| "epoch": 3.2551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6183178609232222e-05, | |
| "loss": 0.0, | |
| "step": 1569 | |
| }, | |
| { | |
| "epoch": 3.2572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6128822971847524e-05, | |
| "loss": 0.0, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 3.259336099585062, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6074496434522506e-05, | |
| "loss": 0.0, | |
| "step": 1571 | |
| }, | |
| { | |
| "epoch": 3.2614107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6020199111227724e-05, | |
| "loss": 0.0, | |
| "step": 1572 | |
| }, | |
| { | |
| "epoch": 3.263485477178423, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5965931115872413e-05, | |
| "loss": 0.0, | |
| "step": 1573 | |
| }, | |
| { | |
| "epoch": 3.2655601659751037, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.591169256230429e-05, | |
| "loss": 0.0, | |
| "step": 1574 | |
| }, | |
| { | |
| "epoch": 3.267634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5857483564309326e-05, | |
| "loss": 0.0, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 3.2697095435684647, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5803304235611472e-05, | |
| "loss": 0.0, | |
| "step": 1576 | |
| }, | |
| { | |
| "epoch": 3.271784232365145, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.574915468987242e-05, | |
| "loss": 0.0, | |
| "step": 1577 | |
| }, | |
| { | |
| "epoch": 3.2738589211618256, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5695035040691418e-05, | |
| "loss": 0.0, | |
| "step": 1578 | |
| }, | |
| { | |
| "epoch": 3.275933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.564094540160496e-05, | |
| "loss": 0.0, | |
| "step": 1579 | |
| }, | |
| { | |
| "epoch": 3.2780082987551866, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5586885886086617e-05, | |
| "loss": 0.0, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 3.280082987551867, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.553285660754673e-05, | |
| "loss": 0.0, | |
| "step": 1581 | |
| }, | |
| { | |
| "epoch": 3.2821576763485476, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5478857679332225e-05, | |
| "loss": 0.0, | |
| "step": 1582 | |
| }, | |
| { | |
| "epoch": 3.284232365145228, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5424889214726357e-05, | |
| "loss": 0.0, | |
| "step": 1583 | |
| }, | |
| { | |
| "epoch": 3.2863070539419086, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.537095132694847e-05, | |
| "loss": 0.0, | |
| "step": 1584 | |
| }, | |
| { | |
| "epoch": 3.288381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5317044129153746e-05, | |
| "loss": 0.0, | |
| "step": 1585 | |
| }, | |
| { | |
| "epoch": 3.2904564315352696, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5263167734433017e-05, | |
| "loss": 0.0, | |
| "step": 1586 | |
| }, | |
| { | |
| "epoch": 3.29253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.520932225581245e-05, | |
| "loss": 0.0, | |
| "step": 1587 | |
| }, | |
| { | |
| "epoch": 3.2946058091286305, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.515550780625341e-05, | |
| "loss": 0.0, | |
| "step": 1588 | |
| }, | |
| { | |
| "epoch": 3.296680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.51017244986521e-05, | |
| "loss": 0.0, | |
| "step": 1589 | |
| }, | |
| { | |
| "epoch": 3.2987551867219915, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5047972445839442e-05, | |
| "loss": 0.0, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 3.300829875518672, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4994251760580782e-05, | |
| "loss": 0.0, | |
| "step": 1591 | |
| }, | |
| { | |
| "epoch": 3.3029045643153525, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4940562555575638e-05, | |
| "loss": 0.0, | |
| "step": 1592 | |
| }, | |
| { | |
| "epoch": 3.304979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4886904943457496e-05, | |
| "loss": 0.0, | |
| "step": 1593 | |
| }, | |
| { | |
| "epoch": 3.3070539419087135, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4833279036793574e-05, | |
| "loss": 0.0, | |
| "step": 1594 | |
| }, | |
| { | |
| "epoch": 3.309128630705394, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.477968494808457e-05, | |
| "loss": 0.0, | |
| "step": 1595 | |
| }, | |
| { | |
| "epoch": 3.3112033195020745, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.472612278976443e-05, | |
| "loss": 0.0, | |
| "step": 1596 | |
| }, | |
| { | |
| "epoch": 3.313278008298755, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4672592674200116e-05, | |
| "loss": 0.0, | |
| "step": 1597 | |
| }, | |
| { | |
| "epoch": 3.3153526970954355, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4619094713691357e-05, | |
| "loss": 0.0, | |
| "step": 1598 | |
| }, | |
| { | |
| "epoch": 3.317427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4565629020470452e-05, | |
| "loss": 0.0, | |
| "step": 1599 | |
| }, | |
| { | |
| "epoch": 3.3195020746887964, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4512195706701975e-05, | |
| "loss": 0.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.3215767634854774, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4458794884482585e-05, | |
| "loss": 0.0, | |
| "step": 1601 | |
| }, | |
| { | |
| "epoch": 3.323651452282158, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.44054266658408e-05, | |
| "loss": 0.0, | |
| "step": 1602 | |
| }, | |
| { | |
| "epoch": 3.3257261410788383, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4352091162736695e-05, | |
| "loss": 0.0, | |
| "step": 1603 | |
| }, | |
| { | |
| "epoch": 3.327800829875519, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4298788487061745e-05, | |
| "loss": 0.0, | |
| "step": 1604 | |
| }, | |
| { | |
| "epoch": 3.3298755186721993, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4245518750638547e-05, | |
| "loss": 0.0, | |
| "step": 1605 | |
| }, | |
| { | |
| "epoch": 3.33195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4192282065220588e-05, | |
| "loss": 0.0, | |
| "step": 1606 | |
| }, | |
| { | |
| "epoch": 3.3340248962655603, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.413907854249204e-05, | |
| "loss": 0.0, | |
| "step": 1607 | |
| }, | |
| { | |
| "epoch": 3.336099585062241, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4085908294067486e-05, | |
| "loss": 0.0, | |
| "step": 1608 | |
| }, | |
| { | |
| "epoch": 3.3381742738589213, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4032771431491694e-05, | |
| "loss": 0.0, | |
| "step": 1609 | |
| }, | |
| { | |
| "epoch": 3.340248962655602, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3979668066239422e-05, | |
| "loss": 0.0, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 3.3423236514522823, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.392659830971513e-05, | |
| "loss": 0.0, | |
| "step": 1611 | |
| }, | |
| { | |
| "epoch": 3.3443983402489628, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3873562273252775e-05, | |
| "loss": 0.0, | |
| "step": 1612 | |
| }, | |
| { | |
| "epoch": 3.3464730290456433, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3820560068115592e-05, | |
| "loss": 0.0, | |
| "step": 1613 | |
| }, | |
| { | |
| "epoch": 3.3485477178423237, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.37675918054958e-05, | |
| "loss": 0.0, | |
| "step": 1614 | |
| }, | |
| { | |
| "epoch": 3.3506224066390042, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3714657596514473e-05, | |
| "loss": 0.0, | |
| "step": 1615 | |
| }, | |
| { | |
| "epoch": 3.3526970954356847, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.366175755222119e-05, | |
| "loss": 0.0, | |
| "step": 1616 | |
| }, | |
| { | |
| "epoch": 3.354771784232365, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3608891783593865e-05, | |
| "loss": 0.0, | |
| "step": 1617 | |
| }, | |
| { | |
| "epoch": 3.3568464730290457, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3556060401538544e-05, | |
| "loss": 0.0, | |
| "step": 1618 | |
| }, | |
| { | |
| "epoch": 3.358921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.350326351688909e-05, | |
| "loss": 0.0, | |
| "step": 1619 | |
| }, | |
| { | |
| "epoch": 3.3609958506224067, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.345050124040701e-05, | |
| "loss": 0.0, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 3.363070539419087, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3397773682781222e-05, | |
| "loss": 0.0, | |
| "step": 1621 | |
| }, | |
| { | |
| "epoch": 3.3651452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3345080954627778e-05, | |
| "loss": 0.0, | |
| "step": 1622 | |
| }, | |
| { | |
| "epoch": 3.367219917012448, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3292423166489703e-05, | |
| "loss": 0.0, | |
| "step": 1623 | |
| }, | |
| { | |
| "epoch": 3.3692946058091287, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.323980042883667e-05, | |
| "loss": 0.0, | |
| "step": 1624 | |
| }, | |
| { | |
| "epoch": 3.371369294605809, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3187212852064858e-05, | |
| "loss": 0.0, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 3.3734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.313466054649669e-05, | |
| "loss": 0.0, | |
| "step": 1626 | |
| }, | |
| { | |
| "epoch": 3.37551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3082143622380553e-05, | |
| "loss": 0.0, | |
| "step": 1627 | |
| }, | |
| { | |
| "epoch": 3.3775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.302966218989064e-05, | |
| "loss": 0.0, | |
| "step": 1628 | |
| }, | |
| { | |
| "epoch": 3.379668049792531, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2977216359126677e-05, | |
| "loss": 0.0, | |
| "step": 1629 | |
| }, | |
| { | |
| "epoch": 3.3817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.292480624011373e-05, | |
| "loss": 0.0, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 3.383817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.287243194280186e-05, | |
| "loss": 0.0, | |
| "step": 1631 | |
| }, | |
| { | |
| "epoch": 3.3858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2820093577066114e-05, | |
| "loss": 0.0, | |
| "step": 1632 | |
| }, | |
| { | |
| "epoch": 3.387966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.276779125270603e-05, | |
| "loss": 0.0, | |
| "step": 1633 | |
| }, | |
| { | |
| "epoch": 3.3900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2715525079445603e-05, | |
| "loss": 0.0, | |
| "step": 1634 | |
| }, | |
| { | |
| "epoch": 3.392116182572614, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.266329516693299e-05, | |
| "loss": 0.0, | |
| "step": 1635 | |
| }, | |
| { | |
| "epoch": 3.3941908713692945, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2611101624740223e-05, | |
| "loss": 0.0, | |
| "step": 1636 | |
| }, | |
| { | |
| "epoch": 3.396265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.25589445623631e-05, | |
| "loss": 0.0, | |
| "step": 1637 | |
| }, | |
| { | |
| "epoch": 3.3983402489626555, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2506824089220852e-05, | |
| "loss": 0.0, | |
| "step": 1638 | |
| }, | |
| { | |
| "epoch": 3.400414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2454740314655972e-05, | |
| "loss": 0.0, | |
| "step": 1639 | |
| }, | |
| { | |
| "epoch": 3.4024896265560165, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.240269334793392e-05, | |
| "loss": 0.0, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 3.404564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.235068329824298e-05, | |
| "loss": 0.0, | |
| "step": 1641 | |
| }, | |
| { | |
| "epoch": 3.4066390041493775, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2298710274693996e-05, | |
| "loss": 0.0, | |
| "step": 1642 | |
| }, | |
| { | |
| "epoch": 3.408713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.224677438632008e-05, | |
| "loss": 0.0, | |
| "step": 1643 | |
| }, | |
| { | |
| "epoch": 3.4107883817427385, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2194875742076497e-05, | |
| "loss": 0.0, | |
| "step": 1644 | |
| }, | |
| { | |
| "epoch": 3.412863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2143014450840364e-05, | |
| "loss": 0.0, | |
| "step": 1645 | |
| }, | |
| { | |
| "epoch": 3.4149377593360994, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2091190621410396e-05, | |
| "loss": 0.0, | |
| "step": 1646 | |
| }, | |
| { | |
| "epoch": 3.41701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.203940436250676e-05, | |
| "loss": 0.0, | |
| "step": 1647 | |
| }, | |
| { | |
| "epoch": 3.4190871369294604, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1987655782770824e-05, | |
| "loss": 0.0, | |
| "step": 1648 | |
| }, | |
| { | |
| "epoch": 3.421161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1935944990764835e-05, | |
| "loss": 0.0, | |
| "step": 1649 | |
| }, | |
| { | |
| "epoch": 3.4232365145228214, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.188427209497183e-05, | |
| "loss": 0.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 3.425311203319502, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1832637203795347e-05, | |
| "loss": 0.0, | |
| "step": 1651 | |
| }, | |
| { | |
| "epoch": 3.4273858921161824, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.178104042555913e-05, | |
| "loss": 0.0, | |
| "step": 1652 | |
| }, | |
| { | |
| "epoch": 3.429460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1729481868507025e-05, | |
| "loss": 0.0, | |
| "step": 1653 | |
| }, | |
| { | |
| "epoch": 3.431535269709544, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.16779616408027e-05, | |
| "loss": 0.0, | |
| "step": 1654 | |
| }, | |
| { | |
| "epoch": 3.4336099585062243, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1626479850529357e-05, | |
| "loss": 0.0, | |
| "step": 1655 | |
| }, | |
| { | |
| "epoch": 3.435684647302905, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.157503660568961e-05, | |
| "loss": 0.0, | |
| "step": 1656 | |
| }, | |
| { | |
| "epoch": 3.4377593360995853, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.15236320142052e-05, | |
| "loss": 0.0, | |
| "step": 1657 | |
| }, | |
| { | |
| "epoch": 3.4398340248962658, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1472266183916773e-05, | |
| "loss": 0.0, | |
| "step": 1658 | |
| }, | |
| { | |
| "epoch": 3.4419087136929463, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.142093922258365e-05, | |
| "loss": 0.0, | |
| "step": 1659 | |
| }, | |
| { | |
| "epoch": 3.4439834024896268, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1369651237883612e-05, | |
| "loss": 0.0, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 3.4460580912863072, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.13184023374127e-05, | |
| "loss": 0.0, | |
| "step": 1661 | |
| }, | |
| { | |
| "epoch": 3.4481327800829877, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1267192628684913e-05, | |
| "loss": 0.0, | |
| "step": 1662 | |
| }, | |
| { | |
| "epoch": 3.4502074688796682, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.121602221913206e-05, | |
| "loss": 0.0, | |
| "step": 1663 | |
| }, | |
| { | |
| "epoch": 3.4522821576763487, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1164891216103526e-05, | |
| "loss": 0.0, | |
| "step": 1664 | |
| }, | |
| { | |
| "epoch": 3.454356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1113799726865964e-05, | |
| "loss": 0.0, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 3.4564315352697097, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1062747858603183e-05, | |
| "loss": 0.0, | |
| "step": 1666 | |
| }, | |
| { | |
| "epoch": 3.45850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1011735718415876e-05, | |
| "loss": 0.0, | |
| "step": 1667 | |
| }, | |
| { | |
| "epoch": 3.4605809128630707, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0960763413321344e-05, | |
| "loss": 0.0, | |
| "step": 1668 | |
| }, | |
| { | |
| "epoch": 3.462655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0909831050253356e-05, | |
| "loss": 0.0, | |
| "step": 1669 | |
| }, | |
| { | |
| "epoch": 3.4647302904564317, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0858938736061906e-05, | |
| "loss": 0.0, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 3.466804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.08080865775129e-05, | |
| "loss": 0.0, | |
| "step": 1671 | |
| }, | |
| { | |
| "epoch": 3.4688796680497926, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0757274681288065e-05, | |
| "loss": 0.0, | |
| "step": 1672 | |
| }, | |
| { | |
| "epoch": 3.470954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0706503153984658e-05, | |
| "loss": 0.0, | |
| "step": 1673 | |
| }, | |
| { | |
| "epoch": 3.4730290456431536, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0655772102115202e-05, | |
| "loss": 0.0, | |
| "step": 1674 | |
| }, | |
| { | |
| "epoch": 3.475103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0605081632107354e-05, | |
| "loss": 0.0, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 3.4771784232365146, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0554431850303615e-05, | |
| "loss": 0.0, | |
| "step": 1676 | |
| }, | |
| { | |
| "epoch": 3.479253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0503822862961146e-05, | |
| "loss": 0.0, | |
| "step": 1677 | |
| }, | |
| { | |
| "epoch": 3.4813278008298756, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0453254776251476e-05, | |
| "loss": 0.0, | |
| "step": 1678 | |
| }, | |
| { | |
| "epoch": 3.483402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0402727696260392e-05, | |
| "loss": 0.0, | |
| "step": 1679 | |
| }, | |
| { | |
| "epoch": 3.4854771784232366, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0352241728987634e-05, | |
| "loss": 0.0, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 3.487551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0301796980346645e-05, | |
| "loss": 0.0, | |
| "step": 1681 | |
| }, | |
| { | |
| "epoch": 3.4896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0251393556164464e-05, | |
| "loss": 0.0, | |
| "step": 1682 | |
| }, | |
| { | |
| "epoch": 3.491701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0201031562181415e-05, | |
| "loss": 0.0, | |
| "step": 1683 | |
| }, | |
| { | |
| "epoch": 3.4937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0150711104050876e-05, | |
| "loss": 0.0, | |
| "step": 1684 | |
| }, | |
| { | |
| "epoch": 3.495850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0100432287339114e-05, | |
| "loss": 0.0, | |
| "step": 1685 | |
| }, | |
| { | |
| "epoch": 3.4979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.005019521752506e-05, | |
| "loss": 0.0, | |
| "step": 1686 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0000000000000012e-05, | |
| "loss": 0.0, | |
| "step": 1687 | |
| }, | |
| { | |
| "epoch": 3.5020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9949846740067495e-05, | |
| "loss": 0.0, | |
| "step": 1688 | |
| }, | |
| { | |
| "epoch": 3.504149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9899735542943035e-05, | |
| "loss": 0.0, | |
| "step": 1689 | |
| }, | |
| { | |
| "epoch": 3.5062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9849666513753863e-05, | |
| "loss": 0.0, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 3.508298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.979963975753879e-05, | |
| "loss": 0.0, | |
| "step": 1691 | |
| }, | |
| { | |
| "epoch": 3.5103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.974965537924794e-05, | |
| "loss": 0.0, | |
| "step": 1692 | |
| }, | |
| { | |
| "epoch": 3.512448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9699713483742487e-05, | |
| "loss": 0.0, | |
| "step": 1693 | |
| }, | |
| { | |
| "epoch": 3.5145228215767634, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9649814175794547e-05, | |
| "loss": 0.0, | |
| "step": 1694 | |
| }, | |
| { | |
| "epoch": 3.516597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.959995756008685e-05, | |
| "loss": 0.0, | |
| "step": 1695 | |
| }, | |
| { | |
| "epoch": 3.5186721991701244, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9550143741212588e-05, | |
| "loss": 0.0, | |
| "step": 1696 | |
| }, | |
| { | |
| "epoch": 3.520746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9500372823675137e-05, | |
| "loss": 0.0, | |
| "step": 1697 | |
| }, | |
| { | |
| "epoch": 3.5228215767634854, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9450644911887892e-05, | |
| "loss": 0.0, | |
| "step": 1698 | |
| }, | |
| { | |
| "epoch": 3.524896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9400960110174036e-05, | |
| "loss": 0.0, | |
| "step": 1699 | |
| }, | |
| { | |
| "epoch": 3.5269709543568464, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.93513185227663e-05, | |
| "loss": 0.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.529045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9301720253806775e-05, | |
| "loss": 0.0, | |
| "step": 1701 | |
| }, | |
| { | |
| "epoch": 3.5311203319502074, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9252165407346628e-05, | |
| "loss": 0.0, | |
| "step": 1702 | |
| }, | |
| { | |
| "epoch": 3.533195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9202654087345972e-05, | |
| "loss": 0.0, | |
| "step": 1703 | |
| }, | |
| { | |
| "epoch": 3.5352697095435683, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9153186397673603e-05, | |
| "loss": 0.0, | |
| "step": 1704 | |
| }, | |
| { | |
| "epoch": 3.537344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.910376244210679e-05, | |
| "loss": 0.0, | |
| "step": 1705 | |
| }, | |
| { | |
| "epoch": 3.5394190871369293, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.905438232433102e-05, | |
| "loss": 0.0, | |
| "step": 1706 | |
| }, | |
| { | |
| "epoch": 3.54149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9005046147939845e-05, | |
| "loss": 0.0, | |
| "step": 1707 | |
| }, | |
| { | |
| "epoch": 3.5435684647302903, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8955754016434635e-05, | |
| "loss": 0.0, | |
| "step": 1708 | |
| }, | |
| { | |
| "epoch": 3.545643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.890650603322433e-05, | |
| "loss": 0.0, | |
| "step": 1709 | |
| }, | |
| { | |
| "epoch": 3.5477178423236513, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8857302301625277e-05, | |
| "loss": 0.0, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 3.5497925311203318, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8808142924860996e-05, | |
| "loss": 0.0, | |
| "step": 1711 | |
| }, | |
| { | |
| "epoch": 3.5518672199170123, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8759028006061918e-05, | |
| "loss": 0.0, | |
| "step": 1712 | |
| }, | |
| { | |
| "epoch": 3.5539419087136928, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.870995764826524e-05, | |
| "loss": 0.0, | |
| "step": 1713 | |
| }, | |
| { | |
| "epoch": 3.5560165975103732, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8660931954414683e-05, | |
| "loss": 0.0, | |
| "step": 1714 | |
| }, | |
| { | |
| "epoch": 3.5580912863070537, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8611951027360225e-05, | |
| "loss": 0.0, | |
| "step": 1715 | |
| }, | |
| { | |
| "epoch": 3.5601659751037342, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.856301496985797e-05, | |
| "loss": 0.0, | |
| "step": 1716 | |
| }, | |
| { | |
| "epoch": 3.5622406639004147, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8514123884569882e-05, | |
| "loss": 0.0, | |
| "step": 1717 | |
| }, | |
| { | |
| "epoch": 3.564315352697095, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.846527787406355e-05, | |
| "loss": 0.0, | |
| "step": 1718 | |
| }, | |
| { | |
| "epoch": 3.5663900414937757, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.841647704081204e-05, | |
| "loss": 0.0, | |
| "step": 1719 | |
| }, | |
| { | |
| "epoch": 3.568464730290456, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.836772148719364e-05, | |
| "loss": 0.0, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 3.5705394190871367, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.83190113154916e-05, | |
| "loss": 0.0, | |
| "step": 1721 | |
| }, | |
| { | |
| "epoch": 3.572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.827034662789401e-05, | |
| "loss": 0.0, | |
| "step": 1722 | |
| }, | |
| { | |
| "epoch": 3.5746887966804977, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8221727526493534e-05, | |
| "loss": 0.0, | |
| "step": 1723 | |
| }, | |
| { | |
| "epoch": 3.576763485477178, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8173154113287202e-05, | |
| "loss": 0.0, | |
| "step": 1724 | |
| }, | |
| { | |
| "epoch": 3.578838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8124626490176168e-05, | |
| "loss": 0.0, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 3.5809128630705396, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8076144758965544e-05, | |
| "loss": 0.0, | |
| "step": 1726 | |
| }, | |
| { | |
| "epoch": 3.58298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.80277090213642e-05, | |
| "loss": 0.0, | |
| "step": 1727 | |
| }, | |
| { | |
| "epoch": 3.5850622406639006, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7979319378984432e-05, | |
| "loss": 0.0, | |
| "step": 1728 | |
| }, | |
| { | |
| "epoch": 3.587136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.793097593334191e-05, | |
| "loss": 0.0, | |
| "step": 1729 | |
| }, | |
| { | |
| "epoch": 3.5892116182572615, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7882678785855387e-05, | |
| "loss": 0.0, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 3.591286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7834428037846425e-05, | |
| "loss": 0.0, | |
| "step": 1731 | |
| }, | |
| { | |
| "epoch": 3.5933609958506225, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.77862237905393e-05, | |
| "loss": 0.0, | |
| "step": 1732 | |
| }, | |
| { | |
| "epoch": 3.595435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7738066145060737e-05, | |
| "loss": 0.0, | |
| "step": 1733 | |
| }, | |
| { | |
| "epoch": 3.5975103734439835, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7689955202439653e-05, | |
| "loss": 0.0, | |
| "step": 1734 | |
| }, | |
| { | |
| "epoch": 3.599585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.764189106360702e-05, | |
| "loss": 0.0, | |
| "step": 1735 | |
| }, | |
| { | |
| "epoch": 3.6016597510373445, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7593873829395645e-05, | |
| "loss": 0.0, | |
| "step": 1736 | |
| }, | |
| { | |
| "epoch": 3.603734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7545903600539864e-05, | |
| "loss": 0.0, | |
| "step": 1737 | |
| }, | |
| { | |
| "epoch": 3.6058091286307055, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7497980477675458e-05, | |
| "loss": 0.0, | |
| "step": 1738 | |
| }, | |
| { | |
| "epoch": 3.607883817427386, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7450104561339388e-05, | |
| "loss": 0.0, | |
| "step": 1739 | |
| }, | |
| { | |
| "epoch": 3.6099585062240664, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.740227595196954e-05, | |
| "loss": 0.0, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 3.612033195020747, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7354494749904582e-05, | |
| "loss": 0.0, | |
| "step": 1741 | |
| }, | |
| { | |
| "epoch": 3.6141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7306761055383732e-05, | |
| "loss": 0.0, | |
| "step": 1742 | |
| }, | |
| { | |
| "epoch": 3.616182572614108, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7259074968546547e-05, | |
| "loss": 0.0, | |
| "step": 1743 | |
| }, | |
| { | |
| "epoch": 3.6182572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7211436589432672e-05, | |
| "loss": 0.0, | |
| "step": 1744 | |
| }, | |
| { | |
| "epoch": 3.620331950207469, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7163846017981702e-05, | |
| "loss": 0.0, | |
| "step": 1745 | |
| }, | |
| { | |
| "epoch": 3.6224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.711630335403294e-05, | |
| "loss": 0.0, | |
| "step": 1746 | |
| }, | |
| { | |
| "epoch": 3.62448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7068808697325153e-05, | |
| "loss": 0.0, | |
| "step": 1747 | |
| }, | |
| { | |
| "epoch": 3.6265560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7021362147496417e-05, | |
| "loss": 0.0, | |
| "step": 1748 | |
| }, | |
| { | |
| "epoch": 3.628630705394191, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.69739638040839e-05, | |
| "loss": 0.0, | |
| "step": 1749 | |
| }, | |
| { | |
| "epoch": 3.6307053941908713, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6926613766523598e-05, | |
| "loss": 0.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 3.632780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6879312134150197e-05, | |
| "loss": 0.0, | |
| "step": 1751 | |
| }, | |
| { | |
| "epoch": 3.6348547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6832059006196848e-05, | |
| "loss": 0.0, | |
| "step": 1752 | |
| }, | |
| { | |
| "epoch": 3.636929460580913, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6784854481794895e-05, | |
| "loss": 0.0, | |
| "step": 1753 | |
| }, | |
| { | |
| "epoch": 3.6390041493775933, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.673769865997376e-05, | |
| "loss": 0.0, | |
| "step": 1754 | |
| }, | |
| { | |
| "epoch": 3.641078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6690591639660692e-05, | |
| "loss": 0.0, | |
| "step": 1755 | |
| }, | |
| { | |
| "epoch": 3.6431535269709543, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.664353351968053e-05, | |
| "loss": 0.0, | |
| "step": 1756 | |
| }, | |
| { | |
| "epoch": 3.645228215767635, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.659652439875555e-05, | |
| "loss": 0.0, | |
| "step": 1757 | |
| }, | |
| { | |
| "epoch": 3.6473029045643153, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.654956437550525e-05, | |
| "loss": 0.0, | |
| "step": 1758 | |
| }, | |
| { | |
| "epoch": 3.6493775933609958, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6502653548446076e-05, | |
| "loss": 0.0, | |
| "step": 1759 | |
| }, | |
| { | |
| "epoch": 3.6514522821576763, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.645579201599131e-05, | |
| "loss": 0.0, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 3.6535269709543567, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.640897987645081e-05, | |
| "loss": 0.0, | |
| "step": 1761 | |
| }, | |
| { | |
| "epoch": 3.6556016597510372, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6362217228030814e-05, | |
| "loss": 0.0, | |
| "step": 1762 | |
| }, | |
| { | |
| "epoch": 3.6576763485477177, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6315504168833715e-05, | |
| "loss": 0.0, | |
| "step": 1763 | |
| }, | |
| { | |
| "epoch": 3.659751037344398, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.626884079685789e-05, | |
| "loss": 0.0, | |
| "step": 1764 | |
| }, | |
| { | |
| "epoch": 3.6618257261410787, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.622222720999749e-05, | |
| "loss": 0.0, | |
| "step": 1765 | |
| }, | |
| { | |
| "epoch": 3.663900414937759, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6175663506042196e-05, | |
| "loss": 0.0, | |
| "step": 1766 | |
| }, | |
| { | |
| "epoch": 3.6659751037344397, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6129149782677056e-05, | |
| "loss": 0.0, | |
| "step": 1767 | |
| }, | |
| { | |
| "epoch": 3.66804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6082686137482263e-05, | |
| "loss": 0.0, | |
| "step": 1768 | |
| }, | |
| { | |
| "epoch": 3.6701244813278007, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6036272667932954e-05, | |
| "loss": 0.0, | |
| "step": 1769 | |
| }, | |
| { | |
| "epoch": 3.6721991701244816, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5989909471399e-05, | |
| "loss": 0.0, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 3.674273858921162, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.594359664514482e-05, | |
| "loss": 0.0, | |
| "step": 1771 | |
| }, | |
| { | |
| "epoch": 3.6763485477178426, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5897334286329128e-05, | |
| "loss": 0.0, | |
| "step": 1772 | |
| }, | |
| { | |
| "epoch": 3.678423236514523, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5851122492004788e-05, | |
| "loss": 0.0, | |
| "step": 1773 | |
| }, | |
| { | |
| "epoch": 3.6804979253112036, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5804961359118597e-05, | |
| "loss": 0.0, | |
| "step": 1774 | |
| }, | |
| { | |
| "epoch": 3.682572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5758850984511035e-05, | |
| "loss": 0.0, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 3.6846473029045645, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5712791464916126e-05, | |
| "loss": 0.0, | |
| "step": 1776 | |
| }, | |
| { | |
| "epoch": 3.686721991701245, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5666782896961207e-05, | |
| "loss": 0.0, | |
| "step": 1777 | |
| }, | |
| { | |
| "epoch": 3.6887966804979255, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5620825377166698e-05, | |
| "loss": 0.0, | |
| "step": 1778 | |
| }, | |
| { | |
| "epoch": 3.690871369294606, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5574919001945947e-05, | |
| "loss": 0.0, | |
| "step": 1779 | |
| }, | |
| { | |
| "epoch": 3.6929460580912865, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.552906386760503e-05, | |
| "loss": 0.0, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 3.695020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5483260070342456e-05, | |
| "loss": 0.0, | |
| "step": 1781 | |
| }, | |
| { | |
| "epoch": 3.6970954356846475, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5437507706249104e-05, | |
| "loss": 0.0, | |
| "step": 1782 | |
| }, | |
| { | |
| "epoch": 3.699170124481328, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5391806871307936e-05, | |
| "loss": 0.0, | |
| "step": 1783 | |
| }, | |
| { | |
| "epoch": 3.7012448132780085, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5346157661393776e-05, | |
| "loss": 0.0, | |
| "step": 1784 | |
| }, | |
| { | |
| "epoch": 3.703319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.530056017227318e-05, | |
| "loss": 0.0, | |
| "step": 1785 | |
| }, | |
| { | |
| "epoch": 3.7053941908713695, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5255014499604212e-05, | |
| "loss": 0.0, | |
| "step": 1786 | |
| }, | |
| { | |
| "epoch": 3.70746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5209520738936183e-05, | |
| "loss": 0.0, | |
| "step": 1787 | |
| }, | |
| { | |
| "epoch": 3.7095435684647304, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5164078985709535e-05, | |
| "loss": 0.0, | |
| "step": 1788 | |
| }, | |
| { | |
| "epoch": 3.711618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5118689335255598e-05, | |
| "loss": 0.0, | |
| "step": 1789 | |
| }, | |
| { | |
| "epoch": 3.7136929460580914, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5073351882796413e-05, | |
| "loss": 0.0, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 3.715767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.502806672344447e-05, | |
| "loss": 0.0, | |
| "step": 1791 | |
| }, | |
| { | |
| "epoch": 3.7178423236514524, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4982833952202586e-05, | |
| "loss": 0.0, | |
| "step": 1792 | |
| }, | |
| { | |
| "epoch": 3.719917012448133, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.49376536639637e-05, | |
| "loss": 0.0, | |
| "step": 1793 | |
| }, | |
| { | |
| "epoch": 3.7219917012448134, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4892525953510574e-05, | |
| "loss": 0.0, | |
| "step": 1794 | |
| }, | |
| { | |
| "epoch": 3.724066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4847450915515742e-05, | |
| "loss": 0.0, | |
| "step": 1795 | |
| }, | |
| { | |
| "epoch": 3.7261410788381744, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.480242864454121e-05, | |
| "loss": 0.0, | |
| "step": 1796 | |
| }, | |
| { | |
| "epoch": 3.728215767634855, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4757459235038258e-05, | |
| "loss": 0.0, | |
| "step": 1797 | |
| }, | |
| { | |
| "epoch": 3.7302904564315353, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4712542781347314e-05, | |
| "loss": 0.0, | |
| "step": 1798 | |
| }, | |
| { | |
| "epoch": 3.732365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4667679377697694e-05, | |
| "loss": 0.0, | |
| "step": 1799 | |
| }, | |
| { | |
| "epoch": 3.7344398340248963, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.46228691182074e-05, | |
| "loss": 0.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.736514522821577, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.457811209688298e-05, | |
| "loss": 0.0, | |
| "step": 1801 | |
| }, | |
| { | |
| "epoch": 3.7385892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4533408407619275e-05, | |
| "loss": 0.0, | |
| "step": 1802 | |
| }, | |
| { | |
| "epoch": 3.740663900414938, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4488758144199229e-05, | |
| "loss": 0.0, | |
| "step": 1803 | |
| }, | |
| { | |
| "epoch": 3.7427385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.444416140029373e-05, | |
| "loss": 0.0, | |
| "step": 1804 | |
| }, | |
| { | |
| "epoch": 3.7448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4399618269461395e-05, | |
| "loss": 0.0, | |
| "step": 1805 | |
| }, | |
| { | |
| "epoch": 3.7468879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4355128845148318e-05, | |
| "loss": 0.0, | |
| "step": 1806 | |
| }, | |
| { | |
| "epoch": 3.7489626556016598, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4310693220687974e-05, | |
| "loss": 0.0, | |
| "step": 1807 | |
| }, | |
| { | |
| "epoch": 3.7510373443983402, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4266311489300955e-05, | |
| "loss": 0.0, | |
| "step": 1808 | |
| }, | |
| { | |
| "epoch": 3.7531120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4221983744094802e-05, | |
| "loss": 0.0, | |
| "step": 1809 | |
| }, | |
| { | |
| "epoch": 3.7551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4177710078063758e-05, | |
| "loss": 0.0, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 3.7572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4133490584088666e-05, | |
| "loss": 0.0, | |
| "step": 1811 | |
| }, | |
| { | |
| "epoch": 3.759336099585062, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.408932535493671e-05, | |
| "loss": 0.0, | |
| "step": 1812 | |
| }, | |
| { | |
| "epoch": 3.7614107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4045214483261198e-05, | |
| "loss": 0.0, | |
| "step": 1813 | |
| }, | |
| { | |
| "epoch": 3.763485477178423, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4001158061601446e-05, | |
| "loss": 0.0, | |
| "step": 1814 | |
| }, | |
| { | |
| "epoch": 3.7655601659751037, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3957156182382537e-05, | |
| "loss": 0.0, | |
| "step": 1815 | |
| }, | |
| { | |
| "epoch": 3.767634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3913208937915079e-05, | |
| "loss": 0.0, | |
| "step": 1816 | |
| }, | |
| { | |
| "epoch": 3.7697095435684647, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3869316420395128e-05, | |
| "loss": 0.0, | |
| "step": 1817 | |
| }, | |
| { | |
| "epoch": 3.771784232365145, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3825478721903909e-05, | |
| "loss": 0.0, | |
| "step": 1818 | |
| }, | |
| { | |
| "epoch": 3.7738589211618256, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3781695934407613e-05, | |
| "loss": 0.0, | |
| "step": 1819 | |
| }, | |
| { | |
| "epoch": 3.775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.373796814975727e-05, | |
| "loss": 0.0, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 3.7780082987551866, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3694295459688518e-05, | |
| "loss": 0.0, | |
| "step": 1821 | |
| }, | |
| { | |
| "epoch": 3.780082987551867, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3650677955821383e-05, | |
| "loss": 0.0, | |
| "step": 1822 | |
| }, | |
| { | |
| "epoch": 3.7821576763485476, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3607115729660146e-05, | |
| "loss": 0.0, | |
| "step": 1823 | |
| }, | |
| { | |
| "epoch": 3.784232365145228, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3563608872593132e-05, | |
| "loss": 0.0, | |
| "step": 1824 | |
| }, | |
| { | |
| "epoch": 3.7863070539419086, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.352015747589246e-05, | |
| "loss": 0.0, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 3.788381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3476761630713946e-05, | |
| "loss": 0.0, | |
| "step": 1826 | |
| }, | |
| { | |
| "epoch": 3.7904564315352696, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3433421428096845e-05, | |
| "loss": 0.0, | |
| "step": 1827 | |
| }, | |
| { | |
| "epoch": 3.79253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.33901369589637e-05, | |
| "loss": 0.0, | |
| "step": 1828 | |
| }, | |
| { | |
| "epoch": 3.7946058091286305, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3346908314120098e-05, | |
| "loss": 0.0, | |
| "step": 1829 | |
| }, | |
| { | |
| "epoch": 3.796680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3303735584254533e-05, | |
| "loss": 0.0, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 3.7987551867219915, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3260618859938217e-05, | |
| "loss": 0.0, | |
| "step": 1831 | |
| }, | |
| { | |
| "epoch": 3.800829875518672, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3217558231624819e-05, | |
| "loss": 0.0, | |
| "step": 1832 | |
| }, | |
| { | |
| "epoch": 3.8029045643153525, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3174553789650365e-05, | |
| "loss": 0.0, | |
| "step": 1833 | |
| }, | |
| { | |
| "epoch": 3.804979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3131605624232999e-05, | |
| "loss": 0.0, | |
| "step": 1834 | |
| }, | |
| { | |
| "epoch": 3.8070539419087135, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3088713825472806e-05, | |
| "loss": 0.0, | |
| "step": 1835 | |
| }, | |
| { | |
| "epoch": 3.809128630705394, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3045878483351624e-05, | |
| "loss": 0.0, | |
| "step": 1836 | |
| }, | |
| { | |
| "epoch": 3.8112033195020745, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3003099687732826e-05, | |
| "loss": 0.0, | |
| "step": 1837 | |
| }, | |
| { | |
| "epoch": 3.813278008298755, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2960377528361176e-05, | |
| "loss": 0.0, | |
| "step": 1838 | |
| }, | |
| { | |
| "epoch": 3.8153526970954355, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2917712094862638e-05, | |
| "loss": 0.0, | |
| "step": 1839 | |
| }, | |
| { | |
| "epoch": 3.817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2875103476744152e-05, | |
| "loss": 0.0, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 3.8195020746887964, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2832551763393459e-05, | |
| "loss": 0.0, | |
| "step": 1841 | |
| }, | |
| { | |
| "epoch": 3.821576763485477, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2790057044078936e-05, | |
| "loss": 0.0, | |
| "step": 1842 | |
| }, | |
| { | |
| "epoch": 3.8236514522821574, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2747619407949406e-05, | |
| "loss": 0.0, | |
| "step": 1843 | |
| }, | |
| { | |
| "epoch": 3.825726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2705238944033887e-05, | |
| "loss": 0.0, | |
| "step": 1844 | |
| }, | |
| { | |
| "epoch": 3.8278008298755184, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2662915741241513e-05, | |
| "loss": 0.0, | |
| "step": 1845 | |
| }, | |
| { | |
| "epoch": 3.8298755186721993, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2620649888361279e-05, | |
| "loss": 0.0, | |
| "step": 1846 | |
| }, | |
| { | |
| "epoch": 3.83195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2578441474061833e-05, | |
| "loss": 0.0, | |
| "step": 1847 | |
| }, | |
| { | |
| "epoch": 3.8340248962655603, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2536290586891355e-05, | |
| "loss": 0.0, | |
| "step": 1848 | |
| }, | |
| { | |
| "epoch": 3.836099585062241, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2494197315277345e-05, | |
| "loss": 0.0, | |
| "step": 1849 | |
| }, | |
| { | |
| "epoch": 3.8381742738589213, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.24521617475264e-05, | |
| "loss": 0.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 3.840248962655602, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2410183971824098e-05, | |
| "loss": 0.0, | |
| "step": 1851 | |
| }, | |
| { | |
| "epoch": 3.8423236514522823, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2368264076234757e-05, | |
| "loss": 0.0, | |
| "step": 1852 | |
| }, | |
| { | |
| "epoch": 3.8443983402489628, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2326402148701257e-05, | |
| "loss": 0.0, | |
| "step": 1853 | |
| }, | |
| { | |
| "epoch": 3.8464730290456433, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2284598277044894e-05, | |
| "loss": 0.0, | |
| "step": 1854 | |
| }, | |
| { | |
| "epoch": 3.8485477178423237, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2242852548965161e-05, | |
| "loss": 0.0, | |
| "step": 1855 | |
| }, | |
| { | |
| "epoch": 3.8506224066390042, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2201165052039578e-05, | |
| "loss": 0.0, | |
| "step": 1856 | |
| }, | |
| { | |
| "epoch": 3.8526970954356847, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2159535873723472e-05, | |
| "loss": 0.0, | |
| "step": 1857 | |
| }, | |
| { | |
| "epoch": 3.854771784232365, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2117965101349865e-05, | |
| "loss": 0.0, | |
| "step": 1858 | |
| }, | |
| { | |
| "epoch": 3.8568464730290457, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2076452822129236e-05, | |
| "loss": 0.0, | |
| "step": 1859 | |
| }, | |
| { | |
| "epoch": 3.858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2034999123149337e-05, | |
| "loss": 0.0, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 3.8609958506224067, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1993604091375044e-05, | |
| "loss": 0.0, | |
| "step": 1861 | |
| }, | |
| { | |
| "epoch": 3.863070539419087, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1952267813648164e-05, | |
| "loss": 0.0, | |
| "step": 1862 | |
| }, | |
| { | |
| "epoch": 3.8651452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1910990376687206e-05, | |
| "loss": 0.0, | |
| "step": 1863 | |
| }, | |
| { | |
| "epoch": 3.867219917012448, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1869771867087279e-05, | |
| "loss": 0.0, | |
| "step": 1864 | |
| }, | |
| { | |
| "epoch": 3.8692946058091287, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1828612371319869e-05, | |
| "loss": 0.0, | |
| "step": 1865 | |
| }, | |
| { | |
| "epoch": 3.871369294605809, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.178751197573261e-05, | |
| "loss": 0.0, | |
| "step": 1866 | |
| }, | |
| { | |
| "epoch": 3.8734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.174647076654921e-05, | |
| "loss": 0.0, | |
| "step": 1867 | |
| }, | |
| { | |
| "epoch": 3.87551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1705488829869185e-05, | |
| "loss": 0.0, | |
| "step": 1868 | |
| }, | |
| { | |
| "epoch": 3.8775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1664566251667689e-05, | |
| "loss": 0.0, | |
| "step": 1869 | |
| }, | |
| { | |
| "epoch": 3.879668049792531, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1623703117795371e-05, | |
| "loss": 0.0, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 3.8817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1582899513978182e-05, | |
| "loss": 0.0, | |
| "step": 1871 | |
| }, | |
| { | |
| "epoch": 3.883817427385892, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1542155525817154e-05, | |
| "loss": 0.0, | |
| "step": 1872 | |
| }, | |
| { | |
| "epoch": 3.8858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1501471238788268e-05, | |
| "loss": 0.0, | |
| "step": 1873 | |
| }, | |
| { | |
| "epoch": 3.887966804979253, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1460846738242273e-05, | |
| "loss": 0.0, | |
| "step": 1874 | |
| }, | |
| { | |
| "epoch": 3.8900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.142028210940449e-05, | |
| "loss": 0.0, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 3.892116182572614, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.13797774373746e-05, | |
| "loss": 0.0, | |
| "step": 1876 | |
| }, | |
| { | |
| "epoch": 3.8941908713692945, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.133933280712654e-05, | |
| "loss": 0.0, | |
| "step": 1877 | |
| }, | |
| { | |
| "epoch": 3.896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1298948303508293e-05, | |
| "loss": 0.0, | |
| "step": 1878 | |
| }, | |
| { | |
| "epoch": 3.8983402489626555, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1258624011241662e-05, | |
| "loss": 0.0, | |
| "step": 1879 | |
| }, | |
| { | |
| "epoch": 3.900414937759336, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1218360014922162e-05, | |
| "loss": 0.0, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 3.9024896265560165, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1178156399018833e-05, | |
| "loss": 0.0, | |
| "step": 1881 | |
| }, | |
| { | |
| "epoch": 3.904564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1138013247873998e-05, | |
| "loss": 0.0, | |
| "step": 1882 | |
| }, | |
| { | |
| "epoch": 3.9066390041493775, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1097930645703156e-05, | |
| "loss": 0.0, | |
| "step": 1883 | |
| }, | |
| { | |
| "epoch": 3.908713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1057908676594807e-05, | |
| "loss": 0.0, | |
| "step": 1884 | |
| }, | |
| { | |
| "epoch": 3.9107883817427385, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.101794742451019e-05, | |
| "loss": 0.0, | |
| "step": 1885 | |
| }, | |
| { | |
| "epoch": 3.912863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0978046973283223e-05, | |
| "loss": 0.0, | |
| "step": 1886 | |
| }, | |
| { | |
| "epoch": 3.9149377593360994, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0938207406620256e-05, | |
| "loss": 0.0, | |
| "step": 1887 | |
| }, | |
| { | |
| "epoch": 3.91701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0898428808099876e-05, | |
| "loss": 0.0, | |
| "step": 1888 | |
| }, | |
| { | |
| "epoch": 3.9190871369294604, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0858711261172816e-05, | |
| "loss": 0.0, | |
| "step": 1889 | |
| }, | |
| { | |
| "epoch": 3.921161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0819054849161716e-05, | |
| "loss": 0.0, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 3.923236514522822, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0779459655260936e-05, | |
| "loss": 0.0, | |
| "step": 1891 | |
| }, | |
| { | |
| "epoch": 3.9253112033195023, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0739925762536427e-05, | |
| "loss": 0.0, | |
| "step": 1892 | |
| }, | |
| { | |
| "epoch": 3.927385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0700453253925546e-05, | |
| "loss": 0.0, | |
| "step": 1893 | |
| }, | |
| { | |
| "epoch": 3.9294605809128633, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0661042212236876e-05, | |
| "loss": 0.0, | |
| "step": 1894 | |
| }, | |
| { | |
| "epoch": 3.931535269709544, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0621692720150012e-05, | |
| "loss": 0.0, | |
| "step": 1895 | |
| }, | |
| { | |
| "epoch": 3.9336099585062243, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0582404860215463e-05, | |
| "loss": 0.0, | |
| "step": 1896 | |
| }, | |
| { | |
| "epoch": 3.935684647302905, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0543178714854444e-05, | |
| "loss": 0.0, | |
| "step": 1897 | |
| }, | |
| { | |
| "epoch": 3.9377593360995853, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0504014366358661e-05, | |
| "loss": 0.0, | |
| "step": 1898 | |
| }, | |
| { | |
| "epoch": 3.9398340248962658, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.046491189689022e-05, | |
| "loss": 0.0, | |
| "step": 1899 | |
| }, | |
| { | |
| "epoch": 3.9419087136929463, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0425871388481399e-05, | |
| "loss": 0.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.9439834024896268, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0386892923034466e-05, | |
| "loss": 0.0, | |
| "step": 1901 | |
| }, | |
| { | |
| "epoch": 3.9460580912863072, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0347976582321562e-05, | |
| "loss": 0.0, | |
| "step": 1902 | |
| }, | |
| { | |
| "epoch": 3.9481327800829877, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0309122447984482e-05, | |
| "loss": 0.0, | |
| "step": 1903 | |
| }, | |
| { | |
| "epoch": 3.9502074688796682, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0270330601534524e-05, | |
| "loss": 0.0, | |
| "step": 1904 | |
| }, | |
| { | |
| "epoch": 3.9522821576763487, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0231601124352317e-05, | |
| "loss": 0.0, | |
| "step": 1905 | |
| }, | |
| { | |
| "epoch": 3.954356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0192934097687646e-05, | |
| "loss": 0.0, | |
| "step": 1906 | |
| }, | |
| { | |
| "epoch": 3.9564315352697097, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0154329602659261e-05, | |
| "loss": 0.0, | |
| "step": 1907 | |
| }, | |
| { | |
| "epoch": 3.95850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0115787720254753e-05, | |
| "loss": 0.0, | |
| "step": 1908 | |
| }, | |
| { | |
| "epoch": 3.9605809128630707, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0077308531330367e-05, | |
| "loss": 0.0, | |
| "step": 1909 | |
| }, | |
| { | |
| "epoch": 3.962655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0038892116610785e-05, | |
| "loss": 0.0, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 3.9647302904564317, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0000538556689037e-05, | |
| "loss": 0.0, | |
| "step": 1911 | |
| }, | |
| { | |
| "epoch": 3.966804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.962247932026274e-06, | |
| "loss": 0.0, | |
| "step": 1912 | |
| }, | |
| { | |
| "epoch": 3.9688796680497926, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.924020322951611e-06, | |
| "loss": 0.0, | |
| "step": 1913 | |
| }, | |
| { | |
| "epoch": 3.970954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.885855809661965e-06, | |
| "loss": 0.0, | |
| "step": 1914 | |
| }, | |
| { | |
| "epoch": 3.9730290456431536, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.847754472221913e-06, | |
| "loss": 0.0, | |
| "step": 1915 | |
| }, | |
| { | |
| "epoch": 3.975103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.80971639056345e-06, | |
| "loss": 0.0, | |
| "step": 1916 | |
| }, | |
| { | |
| "epoch": 3.9771784232365146, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.771741644485906e-06, | |
| "loss": 0.0, | |
| "step": 1917 | |
| }, | |
| { | |
| "epoch": 3.979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.73383031365573e-06, | |
| "loss": 0.0, | |
| "step": 1918 | |
| }, | |
| { | |
| "epoch": 3.9813278008298756, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.695982477606316e-06, | |
| "loss": 0.0, | |
| "step": 1919 | |
| }, | |
| { | |
| "epoch": 3.983402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.65819821573788e-06, | |
| "loss": 0.0, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 3.9854771784232366, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.620477607317249e-06, | |
| "loss": 0.0, | |
| "step": 1921 | |
| }, | |
| { | |
| "epoch": 3.987551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.582820731477733e-06, | |
| "loss": 0.0, | |
| "step": 1922 | |
| }, | |
| { | |
| "epoch": 3.9896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.545227667218903e-06, | |
| "loss": 0.0, | |
| "step": 1923 | |
| }, | |
| { | |
| "epoch": 3.991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.507698493406497e-06, | |
| "loss": 0.0, | |
| "step": 1924 | |
| }, | |
| { | |
| "epoch": 3.9937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.470233288772209e-06, | |
| "loss": 0.0, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 3.995850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.432832131913506e-06, | |
| "loss": 0.0, | |
| "step": 1926 | |
| }, | |
| { | |
| "epoch": 3.9979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.39549510129353e-06, | |
| "loss": 0.0, | |
| "step": 1927 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.358222275240884e-06, | |
| "loss": 0.0, | |
| "step": 1928 | |
| }, | |
| { | |
| "epoch": 4.0020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.32101373194945e-06, | |
| "loss": 0.0, | |
| "step": 1929 | |
| }, | |
| { | |
| "epoch": 4.004149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.283869549478282e-06, | |
| "loss": 0.0, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 4.0062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.246789805751417e-06, | |
| "loss": 0.0, | |
| "step": 1931 | |
| }, | |
| { | |
| "epoch": 4.008298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.209774578557672e-06, | |
| "loss": 0.0, | |
| "step": 1932 | |
| }, | |
| { | |
| "epoch": 4.0103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.172823945550546e-06, | |
| "loss": 0.0, | |
| "step": 1933 | |
| }, | |
| { | |
| "epoch": 4.012448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.135937984248034e-06, | |
| "loss": 0.0, | |
| "step": 1934 | |
| }, | |
| { | |
| "epoch": 4.014522821576763, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.099116772032418e-06, | |
| "loss": 0.0, | |
| "step": 1935 | |
| }, | |
| { | |
| "epoch": 4.016597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.062360386150181e-06, | |
| "loss": 0.0, | |
| "step": 1936 | |
| }, | |
| { | |
| "epoch": 4.018672199170124, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.025668903711806e-06, | |
| "loss": 0.0, | |
| "step": 1937 | |
| }, | |
| { | |
| "epoch": 4.020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.989042401691589e-06, | |
| "loss": 0.0, | |
| "step": 1938 | |
| }, | |
| { | |
| "epoch": 4.022821576763485, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.952480956927525e-06, | |
| "loss": 0.0, | |
| "step": 1939 | |
| }, | |
| { | |
| "epoch": 4.024896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.91598464612113e-06, | |
| "loss": 0.0, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 4.026970954356846, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.879553545837281e-06, | |
| "loss": 0.0, | |
| "step": 1941 | |
| }, | |
| { | |
| "epoch": 4.029045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.84318773250402e-06, | |
| "loss": 0.0, | |
| "step": 1942 | |
| }, | |
| { | |
| "epoch": 4.031120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.806887282412453e-06, | |
| "loss": 0.0, | |
| "step": 1943 | |
| }, | |
| { | |
| "epoch": 4.033195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.770652271716571e-06, | |
| "loss": 0.0, | |
| "step": 1944 | |
| }, | |
| { | |
| "epoch": 4.035269709543568, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.734482776433037e-06, | |
| "loss": 0.0, | |
| "step": 1945 | |
| }, | |
| { | |
| "epoch": 4.037344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.698378872441116e-06, | |
| "loss": 0.0, | |
| "step": 1946 | |
| }, | |
| { | |
| "epoch": 4.039419087136929, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.662340635482458e-06, | |
| "loss": 0.0, | |
| "step": 1947 | |
| }, | |
| { | |
| "epoch": 4.04149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.62636814116093e-06, | |
| "loss": 0.0, | |
| "step": 1948 | |
| }, | |
| { | |
| "epoch": 4.04356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.590461464942499e-06, | |
| "loss": 0.0, | |
| "step": 1949 | |
| }, | |
| { | |
| "epoch": 4.045643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.554620682155064e-06, | |
| "loss": 0.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 4.047717842323651, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.518845867988248e-06, | |
| "loss": 0.0, | |
| "step": 1951 | |
| }, | |
| { | |
| "epoch": 4.049792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.483137097493324e-06, | |
| "loss": 0.0, | |
| "step": 1952 | |
| }, | |
| { | |
| "epoch": 4.051867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.447494445582993e-06, | |
| "loss": 0.0, | |
| "step": 1953 | |
| }, | |
| { | |
| "epoch": 4.053941908713693, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.41191798703124e-06, | |
| "loss": 0.0, | |
| "step": 1954 | |
| }, | |
| { | |
| "epoch": 4.056016597510373, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.376407796473192e-06, | |
| "loss": 0.0, | |
| "step": 1955 | |
| }, | |
| { | |
| "epoch": 4.058091286307054, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.34096394840497e-06, | |
| "loss": 0.0, | |
| "step": 1956 | |
| }, | |
| { | |
| "epoch": 4.060165975103734, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.305586517183482e-06, | |
| "loss": 0.0, | |
| "step": 1957 | |
| }, | |
| { | |
| "epoch": 4.062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.270275577026322e-06, | |
| "loss": 0.0, | |
| "step": 1958 | |
| }, | |
| { | |
| "epoch": 4.064315352697095, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.235031202011603e-06, | |
| "loss": 0.0, | |
| "step": 1959 | |
| }, | |
| { | |
| "epoch": 4.066390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.199853466077784e-06, | |
| "loss": 0.0, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 4.068464730290456, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.164742443023503e-06, | |
| "loss": 0.0, | |
| "step": 1961 | |
| }, | |
| { | |
| "epoch": 4.070539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.129698206507468e-06, | |
| "loss": 0.0, | |
| "step": 1962 | |
| }, | |
| { | |
| "epoch": 4.072614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.094720830048284e-06, | |
| "loss": 0.0, | |
| "step": 1963 | |
| }, | |
| { | |
| "epoch": 4.074688796680498, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.059810387024249e-06, | |
| "loss": 0.0, | |
| "step": 1964 | |
| }, | |
| { | |
| "epoch": 4.076763485477178, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.024966950673287e-06, | |
| "loss": 0.0, | |
| "step": 1965 | |
| }, | |
| { | |
| "epoch": 4.078838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.990190594092739e-06, | |
| "loss": 0.0, | |
| "step": 1966 | |
| }, | |
| { | |
| "epoch": 4.080912863070539, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.955481390239197e-06, | |
| "loss": 0.0, | |
| "step": 1967 | |
| }, | |
| { | |
| "epoch": 4.08298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.920839411928401e-06, | |
| "loss": 0.0, | |
| "step": 1968 | |
| }, | |
| { | |
| "epoch": 4.0850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.886264731835061e-06, | |
| "loss": 0.0, | |
| "step": 1969 | |
| }, | |
| { | |
| "epoch": 4.087136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.851757422492667e-06, | |
| "loss": 0.0, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 4.089211618257261, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817317556293429e-06, | |
| "loss": 0.0, | |
| "step": 1971 | |
| }, | |
| { | |
| "epoch": 4.091286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.78294520548804e-06, | |
| "loss": 0.0, | |
| "step": 1972 | |
| }, | |
| { | |
| "epoch": 4.093360995850622, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.748640442185537e-06, | |
| "loss": 0.0, | |
| "step": 1973 | |
| }, | |
| { | |
| "epoch": 4.095435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.714403338353188e-06, | |
| "loss": 0.0, | |
| "step": 1974 | |
| }, | |
| { | |
| "epoch": 4.097510373443983, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.680233965816333e-06, | |
| "loss": 0.0, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 4.0995850622406635, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.646132396258176e-06, | |
| "loss": 0.0, | |
| "step": 1976 | |
| }, | |
| { | |
| "epoch": 4.101659751037344, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.612098701219719e-06, | |
| "loss": 0.0, | |
| "step": 1977 | |
| }, | |
| { | |
| "epoch": 4.1037344398340245, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.578132952099566e-06, | |
| "loss": 0.0, | |
| "step": 1978 | |
| }, | |
| { | |
| "epoch": 4.105809128630705, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.544235220153751e-06, | |
| "loss": 0.0, | |
| "step": 1979 | |
| }, | |
| { | |
| "epoch": 4.1078838174273855, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.510405576495646e-06, | |
| "loss": 0.0, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 4.109958506224066, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.476644092095777e-06, | |
| "loss": 0.0, | |
| "step": 1981 | |
| }, | |
| { | |
| "epoch": 4.1120331950207465, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.44295083778166e-06, | |
| "loss": 0.0, | |
| "step": 1982 | |
| }, | |
| { | |
| "epoch": 4.114107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.4093258842376925e-06, | |
| "loss": 0.0, | |
| "step": 1983 | |
| }, | |
| { | |
| "epoch": 4.1161825726141075, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.375769302004991e-06, | |
| "loss": 0.0, | |
| "step": 1984 | |
| }, | |
| { | |
| "epoch": 4.118257261410788, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.3422811614812085e-06, | |
| "loss": 0.0, | |
| "step": 1985 | |
| }, | |
| { | |
| "epoch": 4.1203319502074685, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.30886153292044e-06, | |
| "loss": 0.0, | |
| "step": 1986 | |
| }, | |
| { | |
| "epoch": 4.122406639004149, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.2755104864330415e-06, | |
| "loss": 0.0, | |
| "step": 1987 | |
| }, | |
| { | |
| "epoch": 4.124481327800829, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.242228091985506e-06, | |
| "loss": 0.0, | |
| "step": 1988 | |
| }, | |
| { | |
| "epoch": 4.12655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.209014419400269e-06, | |
| "loss": 0.0, | |
| "step": 1989 | |
| }, | |
| { | |
| "epoch": 4.12863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.175869538355629e-06, | |
| "loss": 0.0, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 4.130705394190872, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.1427935183855694e-06, | |
| "loss": 0.0, | |
| "step": 1991 | |
| }, | |
| { | |
| "epoch": 4.132780082987552, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.109786428879575e-06, | |
| "loss": 0.0, | |
| "step": 1992 | |
| }, | |
| { | |
| "epoch": 4.134854771784233, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.0768483390825585e-06, | |
| "loss": 0.0, | |
| "step": 1993 | |
| }, | |
| { | |
| "epoch": 4.136929460580913, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.043979318094676e-06, | |
| "loss": 0.0, | |
| "step": 1994 | |
| }, | |
| { | |
| "epoch": 4.139004149377594, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.01117943487116e-06, | |
| "loss": 0.0, | |
| "step": 1995 | |
| }, | |
| { | |
| "epoch": 4.141078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.9784487582222225e-06, | |
| "loss": 0.0, | |
| "step": 1996 | |
| }, | |
| { | |
| "epoch": 4.143153526970955, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.945787356812892e-06, | |
| "loss": 0.0, | |
| "step": 1997 | |
| }, | |
| { | |
| "epoch": 4.145228215767635, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.913195299162847e-06, | |
| "loss": 0.0, | |
| "step": 1998 | |
| }, | |
| { | |
| "epoch": 4.147302904564316, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.880672653646301e-06, | |
| "loss": 0.0, | |
| "step": 1999 | |
| }, | |
| { | |
| "epoch": 4.149377593360996, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.848219488491863e-06, | |
| "loss": 0.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.151452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.815835871782348e-06, | |
| "loss": 0.0, | |
| "step": 2001 | |
| }, | |
| { | |
| "epoch": 4.153526970954357, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.783521871454697e-06, | |
| "loss": 0.0, | |
| "step": 2002 | |
| }, | |
| { | |
| "epoch": 4.155601659751038, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.751277555299798e-06, | |
| "loss": 0.0, | |
| "step": 2003 | |
| }, | |
| { | |
| "epoch": 4.157676348547718, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.7191029909623275e-06, | |
| "loss": 0.0, | |
| "step": 2004 | |
| }, | |
| { | |
| "epoch": 4.159751037344399, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.686998245940661e-06, | |
| "loss": 0.0, | |
| "step": 2005 | |
| }, | |
| { | |
| "epoch": 4.161825726141079, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.654963387586688e-06, | |
| "loss": 0.0, | |
| "step": 2006 | |
| }, | |
| { | |
| "epoch": 4.16390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.622998483105694e-06, | |
| "loss": 0.0, | |
| "step": 2007 | |
| }, | |
| { | |
| "epoch": 4.16597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.591103599556179e-06, | |
| "loss": 0.0, | |
| "step": 2008 | |
| }, | |
| { | |
| "epoch": 4.168049792531121, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.55927880384978e-06, | |
| "loss": 0.0, | |
| "step": 2009 | |
| }, | |
| { | |
| "epoch": 4.170124481327801, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.5275241627511e-06, | |
| "loss": 0.0, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 4.172199170124482, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.495839742877535e-06, | |
| "loss": 0.0, | |
| "step": 2011 | |
| }, | |
| { | |
| "epoch": 4.174273858921162, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.464225610699193e-06, | |
| "loss": 0.0, | |
| "step": 2012 | |
| }, | |
| { | |
| "epoch": 4.176348547717843, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.432681832538721e-06, | |
| "loss": 0.0, | |
| "step": 2013 | |
| }, | |
| { | |
| "epoch": 4.178423236514523, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.401208474571157e-06, | |
| "loss": 0.0, | |
| "step": 2014 | |
| }, | |
| { | |
| "epoch": 4.180497925311204, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.369805602823822e-06, | |
| "loss": 0.0, | |
| "step": 2015 | |
| }, | |
| { | |
| "epoch": 4.182572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.338473283176179e-06, | |
| "loss": 0.0, | |
| "step": 2016 | |
| }, | |
| { | |
| "epoch": 4.1846473029045645, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.307211581359633e-06, | |
| "loss": 0.0, | |
| "step": 2017 | |
| }, | |
| { | |
| "epoch": 4.186721991701245, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.276020562957486e-06, | |
| "loss": 0.0, | |
| "step": 2018 | |
| }, | |
| { | |
| "epoch": 4.1887966804979255, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.244900293404747e-06, | |
| "loss": 0.0, | |
| "step": 2019 | |
| }, | |
| { | |
| "epoch": 4.190871369294606, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.213850837987983e-06, | |
| "loss": 0.0, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 4.1929460580912865, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.182872261845218e-06, | |
| "loss": 0.0, | |
| "step": 2021 | |
| }, | |
| { | |
| "epoch": 4.195020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.151964629965785e-06, | |
| "loss": 0.0, | |
| "step": 2022 | |
| }, | |
| { | |
| "epoch": 4.1970954356846475, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.121128007190158e-06, | |
| "loss": 0.0, | |
| "step": 2023 | |
| }, | |
| { | |
| "epoch": 4.199170124481328, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.090362458209869e-06, | |
| "loss": 0.0, | |
| "step": 2024 | |
| }, | |
| { | |
| "epoch": 4.2012448132780085, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.059668047567337e-06, | |
| "loss": 0.0, | |
| "step": 2025 | |
| }, | |
| { | |
| "epoch": 4.203319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.029044839655749e-06, | |
| "loss": 0.0, | |
| "step": 2026 | |
| }, | |
| { | |
| "epoch": 4.2053941908713695, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.998492898718891e-06, | |
| "loss": 0.0, | |
| "step": 2027 | |
| }, | |
| { | |
| "epoch": 4.20746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.9680122888510665e-06, | |
| "loss": 0.0, | |
| "step": 2028 | |
| }, | |
| { | |
| "epoch": 4.20954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.937603073996933e-06, | |
| "loss": 0.0, | |
| "step": 2029 | |
| }, | |
| { | |
| "epoch": 4.211618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.907265317951347e-06, | |
| "loss": 0.0, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 4.213692946058091, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.87699908435928e-06, | |
| "loss": 0.0, | |
| "step": 2031 | |
| }, | |
| { | |
| "epoch": 4.215767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.846804436715654e-06, | |
| "loss": 0.0, | |
| "step": 2032 | |
| }, | |
| { | |
| "epoch": 4.217842323651452, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.816681438365183e-06, | |
| "loss": 0.0, | |
| "step": 2033 | |
| }, | |
| { | |
| "epoch": 4.219917012448133, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.786630152502306e-06, | |
| "loss": 0.0, | |
| "step": 2034 | |
| }, | |
| { | |
| "epoch": 4.221991701244813, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.756650642171009e-06, | |
| "loss": 0.0, | |
| "step": 2035 | |
| }, | |
| { | |
| "epoch": 4.224066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.72674297026468e-06, | |
| "loss": 0.0, | |
| "step": 2036 | |
| }, | |
| { | |
| "epoch": 4.226141078838174, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.696907199526025e-06, | |
| "loss": 0.0, | |
| "step": 2037 | |
| }, | |
| { | |
| "epoch": 4.228215767634855, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.667143392546895e-06, | |
| "loss": 0.0, | |
| "step": 2038 | |
| }, | |
| { | |
| "epoch": 4.230290456431535, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.637451611768181e-06, | |
| "loss": 0.0, | |
| "step": 2039 | |
| }, | |
| { | |
| "epoch": 4.232365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.607831919479654e-06, | |
| "loss": 0.0, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 4.234439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.578284377819882e-06, | |
| "loss": 0.0, | |
| "step": 2041 | |
| }, | |
| { | |
| "epoch": 4.236514522821577, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.548809048776034e-06, | |
| "loss": 0.0, | |
| "step": 2042 | |
| }, | |
| { | |
| "epoch": 4.238589211618257, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.5194059941838e-06, | |
| "loss": 0.0, | |
| "step": 2043 | |
| }, | |
| { | |
| "epoch": 4.240663900414938, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.490075275727269e-06, | |
| "loss": 0.0, | |
| "step": 2044 | |
| }, | |
| { | |
| "epoch": 4.242738589211618, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4608169549387325e-06, | |
| "loss": 0.0, | |
| "step": 2045 | |
| }, | |
| { | |
| "epoch": 4.244813278008299, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.431631093198642e-06, | |
| "loss": 0.0, | |
| "step": 2046 | |
| }, | |
| { | |
| "epoch": 4.246887966804979, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.402517751735423e-06, | |
| "loss": 0.0, | |
| "step": 2047 | |
| }, | |
| { | |
| "epoch": 4.24896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.37347699162535e-06, | |
| "loss": 0.0, | |
| "step": 2048 | |
| }, | |
| { | |
| "epoch": 4.25103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.344508873792449e-06, | |
| "loss": 0.0, | |
| "step": 2049 | |
| }, | |
| { | |
| "epoch": 4.253112033195021, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.315613459008351e-06, | |
| "loss": 0.0, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 4.255186721991701, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.2867908078921435e-06, | |
| "loss": 0.0, | |
| "step": 2051 | |
| }, | |
| { | |
| "epoch": 4.257261410788382, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.258040980910282e-06, | |
| "loss": 0.0, | |
| "step": 2052 | |
| }, | |
| { | |
| "epoch": 4.259336099585062, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.229364038376452e-06, | |
| "loss": 0.0, | |
| "step": 2053 | |
| }, | |
| { | |
| "epoch": 4.261410788381743, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.200760040451429e-06, | |
| "loss": 0.0, | |
| "step": 2054 | |
| }, | |
| { | |
| "epoch": 4.263485477178423, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1722290471429404e-06, | |
| "loss": 0.0, | |
| "step": 2055 | |
| }, | |
| { | |
| "epoch": 4.265560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.143771118305587e-06, | |
| "loss": 0.0, | |
| "step": 2056 | |
| }, | |
| { | |
| "epoch": 4.267634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.115386313640685e-06, | |
| "loss": 0.0, | |
| "step": 2057 | |
| }, | |
| { | |
| "epoch": 4.269709543568465, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.087074692696123e-06, | |
| "loss": 0.0, | |
| "step": 2058 | |
| }, | |
| { | |
| "epoch": 4.271784232365145, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.0588363148662775e-06, | |
| "loss": 0.0, | |
| "step": 2059 | |
| }, | |
| { | |
| "epoch": 4.273858921161826, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.03067123939188e-06, | |
| "loss": 0.0, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 4.275933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.002579525359848e-06, | |
| "loss": 0.0, | |
| "step": 2061 | |
| }, | |
| { | |
| "epoch": 4.278008298755187, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.974561231703226e-06, | |
| "loss": 0.0, | |
| "step": 2062 | |
| }, | |
| { | |
| "epoch": 4.280082987551867, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.946616417201031e-06, | |
| "loss": 0.0, | |
| "step": 2063 | |
| }, | |
| { | |
| "epoch": 4.282157676348548, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.918745140478098e-06, | |
| "loss": 0.0, | |
| "step": 2064 | |
| }, | |
| { | |
| "epoch": 4.284232365145228, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.890947460005029e-06, | |
| "loss": 0.0, | |
| "step": 2065 | |
| }, | |
| { | |
| "epoch": 4.286307053941909, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.8632234340980055e-06, | |
| "loss": 0.0, | |
| "step": 2066 | |
| }, | |
| { | |
| "epoch": 4.288381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.835573120918695e-06, | |
| "loss": 0.0, | |
| "step": 2067 | |
| }, | |
| { | |
| "epoch": 4.29045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.807996578474124e-06, | |
| "loss": 0.0, | |
| "step": 2068 | |
| }, | |
| { | |
| "epoch": 4.29253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.780493864616569e-06, | |
| "loss": 0.0, | |
| "step": 2069 | |
| }, | |
| { | |
| "epoch": 4.2946058091286305, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.753065037043403e-06, | |
| "loss": 0.0, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 4.296680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.725710153297005e-06, | |
| "loss": 0.0, | |
| "step": 2071 | |
| }, | |
| { | |
| "epoch": 4.2987551867219915, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.698429270764635e-06, | |
| "loss": 0.0, | |
| "step": 2072 | |
| }, | |
| { | |
| "epoch": 4.300829875518672, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.671222446678303e-06, | |
| "loss": 0.0, | |
| "step": 2073 | |
| }, | |
| { | |
| "epoch": 4.3029045643153525, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.6440897381146366e-06, | |
| "loss": 0.0, | |
| "step": 2074 | |
| }, | |
| { | |
| "epoch": 4.304979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.617031201994801e-06, | |
| "loss": 0.0, | |
| "step": 2075 | |
| }, | |
| { | |
| "epoch": 4.3070539419087135, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.590046895084359e-06, | |
| "loss": 0.0, | |
| "step": 2076 | |
| }, | |
| { | |
| "epoch": 4.309128630705394, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.563136873993115e-06, | |
| "loss": 0.0, | |
| "step": 2077 | |
| }, | |
| { | |
| "epoch": 4.3112033195020745, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.536301195175066e-06, | |
| "loss": 0.0, | |
| "step": 2078 | |
| }, | |
| { | |
| "epoch": 4.313278008298755, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.509539914928249e-06, | |
| "loss": 0.0, | |
| "step": 2079 | |
| }, | |
| { | |
| "epoch": 4.3153526970954355, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.482853089394583e-06, | |
| "loss": 0.0, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 4.317427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.45624077455983e-06, | |
| "loss": 0.0, | |
| "step": 2081 | |
| }, | |
| { | |
| "epoch": 4.319502074688796, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.429703026253429e-06, | |
| "loss": 0.0, | |
| "step": 2082 | |
| }, | |
| { | |
| "epoch": 4.321576763485477, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.403239900148366e-06, | |
| "loss": 0.0, | |
| "step": 2083 | |
| }, | |
| { | |
| "epoch": 4.323651452282157, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.3768514517610995e-06, | |
| "loss": 0.0, | |
| "step": 2084 | |
| }, | |
| { | |
| "epoch": 4.325726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.35053773645143e-06, | |
| "loss": 0.0, | |
| "step": 2085 | |
| }, | |
| { | |
| "epoch": 4.327800829875518, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.3242988094223514e-06, | |
| "loss": 0.0, | |
| "step": 2086 | |
| }, | |
| { | |
| "epoch": 4.329875518672199, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.298134725719974e-06, | |
| "loss": 0.0, | |
| "step": 2087 | |
| }, | |
| { | |
| "epoch": 4.331950207468879, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.272045540233407e-06, | |
| "loss": 0.0, | |
| "step": 2088 | |
| }, | |
| { | |
| "epoch": 4.33402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.246031307694605e-06, | |
| "loss": 0.0, | |
| "step": 2089 | |
| }, | |
| { | |
| "epoch": 4.33609958506224, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.220092082678307e-06, | |
| "loss": 0.0, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 4.338174273858921, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.194227919601881e-06, | |
| "loss": 0.0, | |
| "step": 2091 | |
| }, | |
| { | |
| "epoch": 4.340248962655601, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.16843887272524e-06, | |
| "loss": 0.0, | |
| "step": 2092 | |
| }, | |
| { | |
| "epoch": 4.342323651452282, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.14272499615068e-06, | |
| "loss": 0.0, | |
| "step": 2093 | |
| }, | |
| { | |
| "epoch": 4.344398340248962, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.1170863438228316e-06, | |
| "loss": 0.0, | |
| "step": 2094 | |
| }, | |
| { | |
| "epoch": 4.346473029045643, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.091522969528505e-06, | |
| "loss": 0.0, | |
| "step": 2095 | |
| }, | |
| { | |
| "epoch": 4.348547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.066034926896567e-06, | |
| "loss": 0.0, | |
| "step": 2096 | |
| }, | |
| { | |
| "epoch": 4.350622406639004, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.040622269397871e-06, | |
| "loss": 0.0, | |
| "step": 2097 | |
| }, | |
| { | |
| "epoch": 4.352697095435684, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.0152850503451194e-06, | |
| "loss": 0.0, | |
| "step": 2098 | |
| }, | |
| { | |
| "epoch": 4.354771784232365, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.990023322892738e-06, | |
| "loss": 0.0, | |
| "step": 2099 | |
| }, | |
| { | |
| "epoch": 4.356846473029045, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.964837140036793e-06, | |
| "loss": 0.0, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 4.358921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.939726554614876e-06, | |
| "loss": 0.0, | |
| "step": 2101 | |
| }, | |
| { | |
| "epoch": 4.360995850622406, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.91469161930595e-06, | |
| "loss": 0.0, | |
| "step": 2102 | |
| }, | |
| { | |
| "epoch": 4.363070539419087, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.889732386630312e-06, | |
| "loss": 0.0, | |
| "step": 2103 | |
| }, | |
| { | |
| "epoch": 4.365145228215767, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.86484890894943e-06, | |
| "loss": 0.0, | |
| "step": 2104 | |
| }, | |
| { | |
| "epoch": 4.367219917012449, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.840041238465824e-06, | |
| "loss": 0.0, | |
| "step": 2105 | |
| }, | |
| { | |
| "epoch": 4.369294605809129, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.8153094272230264e-06, | |
| "loss": 0.0, | |
| "step": 2106 | |
| }, | |
| { | |
| "epoch": 4.37136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7906535271053926e-06, | |
| "loss": 0.0, | |
| "step": 2107 | |
| }, | |
| { | |
| "epoch": 4.37344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.766073589838022e-06, | |
| "loss": 0.0, | |
| "step": 2108 | |
| }, | |
| { | |
| "epoch": 4.375518672199171, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7415696669866707e-06, | |
| "loss": 0.0, | |
| "step": 2109 | |
| }, | |
| { | |
| "epoch": 4.377593360995851, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7171418099576274e-06, | |
| "loss": 0.0, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 4.3796680497925315, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.692790069997583e-06, | |
| "loss": 0.0, | |
| "step": 2111 | |
| }, | |
| { | |
| "epoch": 4.381742738589212, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.668514498193565e-06, | |
| "loss": 0.0, | |
| "step": 2112 | |
| }, | |
| { | |
| "epoch": 4.3838174273858925, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.644315145472801e-06, | |
| "loss": 0.0, | |
| "step": 2113 | |
| }, | |
| { | |
| "epoch": 4.385892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6201920626026143e-06, | |
| "loss": 0.0, | |
| "step": 2114 | |
| }, | |
| { | |
| "epoch": 4.3879668049792535, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.596145300190328e-06, | |
| "loss": 0.0, | |
| "step": 2115 | |
| }, | |
| { | |
| "epoch": 4.390041493775934, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.572174908683166e-06, | |
| "loss": 0.0, | |
| "step": 2116 | |
| }, | |
| { | |
| "epoch": 4.3921161825726145, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5482809383681117e-06, | |
| "loss": 0.0, | |
| "step": 2117 | |
| }, | |
| { | |
| "epoch": 4.394190871369295, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5244634393718457e-06, | |
| "loss": 0.0, | |
| "step": 2118 | |
| }, | |
| { | |
| "epoch": 4.3962655601659755, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5007224616606085e-06, | |
| "loss": 0.0, | |
| "step": 2119 | |
| }, | |
| { | |
| "epoch": 4.398340248962656, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.477058055040128e-06, | |
| "loss": 0.0, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 4.4004149377593365, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.453470269155461e-06, | |
| "loss": 0.0, | |
| "step": 2121 | |
| }, | |
| { | |
| "epoch": 4.402489626556017, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4299591534909536e-06, | |
| "loss": 0.0, | |
| "step": 2122 | |
| }, | |
| { | |
| "epoch": 4.404564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4065247573700977e-06, | |
| "loss": 0.0, | |
| "step": 2123 | |
| }, | |
| { | |
| "epoch": 4.406639004149378, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.383167129955429e-06, | |
| "loss": 0.0, | |
| "step": 2124 | |
| }, | |
| { | |
| "epoch": 4.408713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3598863202484442e-06, | |
| "loss": 0.0, | |
| "step": 2125 | |
| }, | |
| { | |
| "epoch": 4.410788381742739, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.336682377089484e-06, | |
| "loss": 0.0, | |
| "step": 2126 | |
| }, | |
| { | |
| "epoch": 4.412863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.3135553491576087e-06, | |
| "loss": 0.0, | |
| "step": 2127 | |
| }, | |
| { | |
| "epoch": 4.4149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2905052849705555e-06, | |
| "loss": 0.0, | |
| "step": 2128 | |
| }, | |
| { | |
| "epoch": 4.41701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.267532232884589e-06, | |
| "loss": 0.0, | |
| "step": 2129 | |
| }, | |
| { | |
| "epoch": 4.419087136929461, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.24463624109439e-06, | |
| "loss": 0.0, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 4.421161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2218173576330012e-06, | |
| "loss": 0.0, | |
| "step": 2131 | |
| }, | |
| { | |
| "epoch": 4.423236514522822, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1990756303717042e-06, | |
| "loss": 0.0, | |
| "step": 2132 | |
| }, | |
| { | |
| "epoch": 4.425311203319502, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1764111070198857e-06, | |
| "loss": 0.0, | |
| "step": 2133 | |
| }, | |
| { | |
| "epoch": 4.427385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1538238351250004e-06, | |
| "loss": 0.0, | |
| "step": 2134 | |
| }, | |
| { | |
| "epoch": 4.429460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.131313862072425e-06, | |
| "loss": 0.0, | |
| "step": 2135 | |
| }, | |
| { | |
| "epoch": 4.431535269709544, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.108881235085366e-06, | |
| "loss": 0.0, | |
| "step": 2136 | |
| }, | |
| { | |
| "epoch": 4.433609958506224, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.086526001224779e-06, | |
| "loss": 0.0, | |
| "step": 2137 | |
| }, | |
| { | |
| "epoch": 4.435684647302905, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0642482073892556e-06, | |
| "loss": 0.0, | |
| "step": 2138 | |
| }, | |
| { | |
| "epoch": 4.437759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.042047900314926e-06, | |
| "loss": 0.0, | |
| "step": 2139 | |
| }, | |
| { | |
| "epoch": 4.439834024896266, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.019925126575358e-06, | |
| "loss": 0.0, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 4.441908713692946, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.997879932581471e-06, | |
| "loss": 0.0, | |
| "step": 2141 | |
| }, | |
| { | |
| "epoch": 4.443983402489627, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9759123645814302e-06, | |
| "loss": 0.0, | |
| "step": 2142 | |
| }, | |
| { | |
| "epoch": 4.446058091286307, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.95402246866054e-06, | |
| "loss": 0.0, | |
| "step": 2143 | |
| }, | |
| { | |
| "epoch": 4.448132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9322102907411775e-06, | |
| "loss": 0.0, | |
| "step": 2144 | |
| }, | |
| { | |
| "epoch": 4.450207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9104758765826635e-06, | |
| "loss": 0.0, | |
| "step": 2145 | |
| }, | |
| { | |
| "epoch": 4.452282157676349, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.888819271781178e-06, | |
| "loss": 0.0, | |
| "step": 2146 | |
| }, | |
| { | |
| "epoch": 4.454356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8672405217696766e-06, | |
| "loss": 0.0, | |
| "step": 2147 | |
| }, | |
| { | |
| "epoch": 4.45643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.845739671817778e-06, | |
| "loss": 0.0, | |
| "step": 2148 | |
| }, | |
| { | |
| "epoch": 4.45850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.824316767031672e-06, | |
| "loss": 0.0, | |
| "step": 2149 | |
| }, | |
| { | |
| "epoch": 4.460580912863071, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8029718523540395e-06, | |
| "loss": 0.0, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 4.462655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.78170497256395e-06, | |
| "loss": 0.0, | |
| "step": 2151 | |
| }, | |
| { | |
| "epoch": 4.464730290456432, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.760516172276737e-06, | |
| "loss": 0.0, | |
| "step": 2152 | |
| }, | |
| { | |
| "epoch": 4.466804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7394054959439675e-06, | |
| "loss": 0.0, | |
| "step": 2153 | |
| }, | |
| { | |
| "epoch": 4.468879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.7183729878532993e-06, | |
| "loss": 0.0, | |
| "step": 2154 | |
| }, | |
| { | |
| "epoch": 4.470954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6974186921283973e-06, | |
| "loss": 0.0, | |
| "step": 2155 | |
| }, | |
| { | |
| "epoch": 4.473029045643154, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.676542652728853e-06, | |
| "loss": 0.0, | |
| "step": 2156 | |
| }, | |
| { | |
| "epoch": 4.475103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6557449134500824e-06, | |
| "loss": 0.0, | |
| "step": 2157 | |
| }, | |
| { | |
| "epoch": 4.477178423236515, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.635025517923251e-06, | |
| "loss": 0.0, | |
| "step": 2158 | |
| }, | |
| { | |
| "epoch": 4.479253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6143845096151356e-06, | |
| "loss": 0.0, | |
| "step": 2159 | |
| }, | |
| { | |
| "epoch": 4.481327800829876, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5938219318281023e-06, | |
| "loss": 0.0, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 4.483402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5733378276999686e-06, | |
| "loss": 0.0, | |
| "step": 2161 | |
| }, | |
| { | |
| "epoch": 4.485477178423237, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5529322402039026e-06, | |
| "loss": 0.0, | |
| "step": 2162 | |
| }, | |
| { | |
| "epoch": 4.487551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.5326052121483756e-06, | |
| "loss": 0.0, | |
| "step": 2163 | |
| }, | |
| { | |
| "epoch": 4.4896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.512356786177059e-06, | |
| "loss": 0.0, | |
| "step": 2164 | |
| }, | |
| { | |
| "epoch": 4.491701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.492187004768698e-06, | |
| "loss": 0.0, | |
| "step": 2165 | |
| }, | |
| { | |
| "epoch": 4.4937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.472095910237071e-06, | |
| "loss": 0.0, | |
| "step": 2166 | |
| }, | |
| { | |
| "epoch": 4.495850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4520835447308812e-06, | |
| "loss": 0.0, | |
| "step": 2167 | |
| }, | |
| { | |
| "epoch": 4.4979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4321499502336508e-06, | |
| "loss": 0.0, | |
| "step": 2168 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4122951685636674e-06, | |
| "loss": 0.0, | |
| "step": 2169 | |
| }, | |
| { | |
| "epoch": 4.5020746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3925192413738784e-06, | |
| "loss": 0.0, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 4.504149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3728222101517907e-06, | |
| "loss": 0.0, | |
| "step": 2171 | |
| }, | |
| { | |
| "epoch": 4.5062240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3532041162194077e-06, | |
| "loss": 0.0, | |
| "step": 2172 | |
| }, | |
| { | |
| "epoch": 4.508298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.333665000733132e-06, | |
| "loss": 0.0, | |
| "step": 2173 | |
| }, | |
| { | |
| "epoch": 4.5103734439834025, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.314204904683668e-06, | |
| "loss": 0.0, | |
| "step": 2174 | |
| }, | |
| { | |
| "epoch": 4.512448132780083, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2948238688959633e-06, | |
| "loss": 0.0, | |
| "step": 2175 | |
| }, | |
| { | |
| "epoch": 4.514522821576763, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.275521934029099e-06, | |
| "loss": 0.0, | |
| "step": 2176 | |
| }, | |
| { | |
| "epoch": 4.516597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2562991405762036e-06, | |
| "loss": 0.0, | |
| "step": 2177 | |
| }, | |
| { | |
| "epoch": 4.518672199170124, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.237155528864383e-06, | |
| "loss": 0.0, | |
| "step": 2178 | |
| }, | |
| { | |
| "epoch": 4.520746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.218091139054641e-06, | |
| "loss": 0.0, | |
| "step": 2179 | |
| }, | |
| { | |
| "epoch": 4.522821576763485, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.199106011141763e-06, | |
| "loss": 0.0, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 4.524896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.180200184954262e-06, | |
| "loss": 0.0, | |
| "step": 2181 | |
| }, | |
| { | |
| "epoch": 4.526970954356846, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.161373700154292e-06, | |
| "loss": 0.0, | |
| "step": 2182 | |
| }, | |
| { | |
| "epoch": 4.529045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.142626596237545e-06, | |
| "loss": 0.0, | |
| "step": 2183 | |
| }, | |
| { | |
| "epoch": 4.531120331950207, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.123958912533195e-06, | |
| "loss": 0.0, | |
| "step": 2184 | |
| }, | |
| { | |
| "epoch": 4.533195020746888, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1053706882037874e-06, | |
| "loss": 0.0, | |
| "step": 2185 | |
| }, | |
| { | |
| "epoch": 4.535269709543568, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0868619622451904e-06, | |
| "loss": 0.0, | |
| "step": 2186 | |
| }, | |
| { | |
| "epoch": 4.537344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0684327734864728e-06, | |
| "loss": 0.0, | |
| "step": 2187 | |
| }, | |
| { | |
| "epoch": 4.539419087136929, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.050083160589855e-06, | |
| "loss": 0.0, | |
| "step": 2188 | |
| }, | |
| { | |
| "epoch": 4.54149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0318131620506287e-06, | |
| "loss": 0.0, | |
| "step": 2189 | |
| }, | |
| { | |
| "epoch": 4.54356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0136228161970316e-06, | |
| "loss": 0.0, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 4.545643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.995512161190236e-06, | |
| "loss": 0.0, | |
| "step": 2191 | |
| }, | |
| { | |
| "epoch": 4.547717842323651, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9774812350242102e-06, | |
| "loss": 0.0, | |
| "step": 2192 | |
| }, | |
| { | |
| "epoch": 4.549792531120332, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.959530075525664e-06, | |
| "loss": 0.0, | |
| "step": 2193 | |
| }, | |
| { | |
| "epoch": 4.551867219917012, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9416587203539714e-06, | |
| "loss": 0.0, | |
| "step": 2194 | |
| }, | |
| { | |
| "epoch": 4.553941908713693, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.923867207001093e-06, | |
| "loss": 0.0, | |
| "step": 2195 | |
| }, | |
| { | |
| "epoch": 4.556016597510373, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.906155572791466e-06, | |
| "loss": 0.0, | |
| "step": 2196 | |
| }, | |
| { | |
| "epoch": 4.558091286307054, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8885238548819762e-06, | |
| "loss": 0.0, | |
| "step": 2197 | |
| }, | |
| { | |
| "epoch": 4.560165975103734, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.870972090261849e-06, | |
| "loss": 0.0, | |
| "step": 2198 | |
| }, | |
| { | |
| "epoch": 4.562240663900415, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8535003157525633e-06, | |
| "loss": 0.0, | |
| "step": 2199 | |
| }, | |
| { | |
| "epoch": 4.564315352697095, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.836108568007804e-06, | |
| "loss": 0.0, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 4.566390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8187968835133717e-06, | |
| "loss": 0.0, | |
| "step": 2201 | |
| }, | |
| { | |
| "epoch": 4.568464730290456, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8015652985870867e-06, | |
| "loss": 0.0, | |
| "step": 2202 | |
| }, | |
| { | |
| "epoch": 4.570539419087137, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7844138493787389e-06, | |
| "loss": 0.0, | |
| "step": 2203 | |
| }, | |
| { | |
| "epoch": 4.572614107883817, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.767342571870012e-06, | |
| "loss": 0.0, | |
| "step": 2204 | |
| }, | |
| { | |
| "epoch": 4.574688796680498, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7503515018743923e-06, | |
| "loss": 0.0, | |
| "step": 2205 | |
| }, | |
| { | |
| "epoch": 4.576763485477178, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7334406750370857e-06, | |
| "loss": 0.0, | |
| "step": 2206 | |
| }, | |
| { | |
| "epoch": 4.578838174273859, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7166101268349855e-06, | |
| "loss": 0.0, | |
| "step": 2207 | |
| }, | |
| { | |
| "epoch": 4.580912863070539, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6998598925765542e-06, | |
| "loss": 0.0, | |
| "step": 2208 | |
| }, | |
| { | |
| "epoch": 4.58298755186722, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6831900074017582e-06, | |
| "loss": 0.0, | |
| "step": 2209 | |
| }, | |
| { | |
| "epoch": 4.5850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.666600506282019e-06, | |
| "loss": 0.0, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 4.587136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6500914240201104e-06, | |
| "loss": 0.0, | |
| "step": 2211 | |
| }, | |
| { | |
| "epoch": 4.589211618257261, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6336627952500928e-06, | |
| "loss": 0.0, | |
| "step": 2212 | |
| }, | |
| { | |
| "epoch": 4.591286307053942, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6173146544372631e-06, | |
| "loss": 0.0, | |
| "step": 2213 | |
| }, | |
| { | |
| "epoch": 4.593360995850622, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6010470358780494e-06, | |
| "loss": 0.0, | |
| "step": 2214 | |
| }, | |
| { | |
| "epoch": 4.595435684647303, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5848599736999482e-06, | |
| "loss": 0.0, | |
| "step": 2215 | |
| }, | |
| { | |
| "epoch": 4.597510373443983, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5687535018614796e-06, | |
| "loss": 0.0, | |
| "step": 2216 | |
| }, | |
| { | |
| "epoch": 4.5995850622406635, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5527276541520775e-06, | |
| "loss": 0.0, | |
| "step": 2217 | |
| }, | |
| { | |
| "epoch": 4.601659751037344, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5367824641920393e-06, | |
| "loss": 0.0, | |
| "step": 2218 | |
| }, | |
| { | |
| "epoch": 4.6037344398340245, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5209179654324558e-06, | |
| "loss": 0.0, | |
| "step": 2219 | |
| }, | |
| { | |
| "epoch": 4.605809128630705, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5051341911551443e-06, | |
| "loss": 0.0, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 4.6078838174273855, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4894311744725464e-06, | |
| "loss": 0.0, | |
| "step": 2221 | |
| }, | |
| { | |
| "epoch": 4.609958506224066, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4738089483277107e-06, | |
| "loss": 0.0, | |
| "step": 2222 | |
| }, | |
| { | |
| "epoch": 4.6120331950207465, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4582675454941897e-06, | |
| "loss": 0.0, | |
| "step": 2223 | |
| }, | |
| { | |
| "epoch": 4.614107883817427, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.442806998575974e-06, | |
| "loss": 0.0, | |
| "step": 2224 | |
| }, | |
| { | |
| "epoch": 4.6161825726141075, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4274273400074257e-06, | |
| "loss": 0.0, | |
| "step": 2225 | |
| }, | |
| { | |
| "epoch": 4.618257261410788, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.41212860205322e-06, | |
| "loss": 0.0, | |
| "step": 2226 | |
| }, | |
| { | |
| "epoch": 4.6203319502074685, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3969108168082702e-06, | |
| "loss": 0.0, | |
| "step": 2227 | |
| }, | |
| { | |
| "epoch": 4.622406639004149, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3817740161976522e-06, | |
| "loss": 0.0, | |
| "step": 2228 | |
| }, | |
| { | |
| "epoch": 4.624481327800829, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3667182319765514e-06, | |
| "loss": 0.0, | |
| "step": 2229 | |
| }, | |
| { | |
| "epoch": 4.62655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3517434957302e-06, | |
| "loss": 0.0, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 4.62863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3368498388737837e-06, | |
| "loss": 0.0, | |
| "step": 2231 | |
| }, | |
| { | |
| "epoch": 4.630705394190871, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3220372926524027e-06, | |
| "loss": 0.0, | |
| "step": 2232 | |
| }, | |
| { | |
| "epoch": 4.632780082987551, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3073058881410038e-06, | |
| "loss": 0.0, | |
| "step": 2233 | |
| }, | |
| { | |
| "epoch": 4.634854771784232, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2926556562442883e-06, | |
| "loss": 0.0, | |
| "step": 2234 | |
| }, | |
| { | |
| "epoch": 4.636929460580912, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2780866276966841e-06, | |
| "loss": 0.0, | |
| "step": 2235 | |
| }, | |
| { | |
| "epoch": 4.639004149377593, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2635988330622673e-06, | |
| "loss": 0.0, | |
| "step": 2236 | |
| }, | |
| { | |
| "epoch": 4.641078838174274, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.249192302734672e-06, | |
| "loss": 0.0, | |
| "step": 2237 | |
| }, | |
| { | |
| "epoch": 4.643153526970955, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2348670669370732e-06, | |
| "loss": 0.0, | |
| "step": 2238 | |
| }, | |
| { | |
| "epoch": 4.645228215767635, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2206231557220937e-06, | |
| "loss": 0.0, | |
| "step": 2239 | |
| }, | |
| { | |
| "epoch": 4.647302904564316, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2064605989717459e-06, | |
| "loss": 0.0, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 4.649377593360996, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.192379426397352e-06, | |
| "loss": 0.0, | |
| "step": 2241 | |
| }, | |
| { | |
| "epoch": 4.651452282157677, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1783796675395442e-06, | |
| "loss": 0.0, | |
| "step": 2242 | |
| }, | |
| { | |
| "epoch": 4.653526970954357, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.164461351768109e-06, | |
| "loss": 0.0, | |
| "step": 2243 | |
| }, | |
| { | |
| "epoch": 4.655601659751038, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1506245082820057e-06, | |
| "loss": 0.0, | |
| "step": 2244 | |
| }, | |
| { | |
| "epoch": 4.657676348547718, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1368691661092713e-06, | |
| "loss": 0.0, | |
| "step": 2245 | |
| }, | |
| { | |
| "epoch": 4.659751037344399, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1231953541069474e-06, | |
| "loss": 0.0, | |
| "step": 2246 | |
| }, | |
| { | |
| "epoch": 4.661825726141079, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.109603100961052e-06, | |
| "loss": 0.0, | |
| "step": 2247 | |
| }, | |
| { | |
| "epoch": 4.66390041493776, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0960924351864999e-06, | |
| "loss": 0.0, | |
| "step": 2248 | |
| }, | |
| { | |
| "epoch": 4.66597510373444, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.082663385127032e-06, | |
| "loss": 0.0, | |
| "step": 2249 | |
| }, | |
| { | |
| "epoch": 4.668049792531121, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0693159789551876e-06, | |
| "loss": 0.0, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 4.670124481327801, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0560502446722088e-06, | |
| "loss": 0.0, | |
| "step": 2251 | |
| }, | |
| { | |
| "epoch": 4.672199170124482, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.04286621010802e-06, | |
| "loss": 0.0, | |
| "step": 2252 | |
| }, | |
| { | |
| "epoch": 4.674273858921162, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0297639029211327e-06, | |
| "loss": 0.0, | |
| "step": 2253 | |
| }, | |
| { | |
| "epoch": 4.676348547717843, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.016743350598608e-06, | |
| "loss": 0.0, | |
| "step": 2254 | |
| }, | |
| { | |
| "epoch": 4.678423236514523, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0038045804560048e-06, | |
| "loss": 0.0, | |
| "step": 2255 | |
| }, | |
| { | |
| "epoch": 4.680497925311204, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.909476196372946e-07, | |
| "loss": 0.0, | |
| "step": 2256 | |
| }, | |
| { | |
| "epoch": 4.682572614107884, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.781724951148442e-07, | |
| "loss": 0.0, | |
| "step": 2257 | |
| }, | |
| { | |
| "epoch": 4.6846473029045645, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.654792336893259e-07, | |
| "loss": 0.0, | |
| "step": 2258 | |
| }, | |
| { | |
| "epoch": 4.686721991701245, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.528678619896659e-07, | |
| "loss": 0.0, | |
| "step": 2259 | |
| }, | |
| { | |
| "epoch": 4.6887966804979255, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.40338406473007e-07, | |
| "loss": 0.0, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 4.690871369294606, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.278908934246477e-07, | |
| "loss": 0.0, | |
| "step": 2261 | |
| }, | |
| { | |
| "epoch": 4.6929460580912865, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.155253489579619e-07, | |
| "loss": 0.0, | |
| "step": 2262 | |
| }, | |
| { | |
| "epoch": 4.695020746887967, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.03241799014376e-07, | |
| "loss": 0.0, | |
| "step": 2263 | |
| }, | |
| { | |
| "epoch": 4.6970954356846475, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.910402693632991e-07, | |
| "loss": 0.0, | |
| "step": 2264 | |
| }, | |
| { | |
| "epoch": 4.699170124481328, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.789207856020732e-07, | |
| "loss": 0.0, | |
| "step": 2265 | |
| }, | |
| { | |
| "epoch": 4.7012448132780085, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.668833731559156e-07, | |
| "loss": 0.0, | |
| "step": 2266 | |
| }, | |
| { | |
| "epoch": 4.703319502074689, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.549280572778706e-07, | |
| "loss": 0.0, | |
| "step": 2267 | |
| }, | |
| { | |
| "epoch": 4.7053941908713695, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.430548630487467e-07, | |
| "loss": 0.0, | |
| "step": 2268 | |
| }, | |
| { | |
| "epoch": 4.70746887966805, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.312638153770813e-07, | |
| "loss": 0.0, | |
| "step": 2269 | |
| }, | |
| { | |
| "epoch": 4.70954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.195549389990698e-07, | |
| "loss": 0.0, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 4.711618257261411, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.079282584785297e-07, | |
| "loss": 0.0, | |
| "step": 2271 | |
| }, | |
| { | |
| "epoch": 4.713692946058091, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.963837982068301e-07, | |
| "loss": 0.0, | |
| "step": 2272 | |
| }, | |
| { | |
| "epoch": 4.715767634854772, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.849215824028645e-07, | |
| "loss": 0.0, | |
| "step": 2273 | |
| }, | |
| { | |
| "epoch": 4.717842323651452, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.735416351129798e-07, | |
| "loss": 0.0, | |
| "step": 2274 | |
| }, | |
| { | |
| "epoch": 4.719917012448133, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.622439802109371e-07, | |
| "loss": 0.0, | |
| "step": 2275 | |
| }, | |
| { | |
| "epoch": 4.721991701244813, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.510286413978573e-07, | |
| "loss": 0.0, | |
| "step": 2276 | |
| }, | |
| { | |
| "epoch": 4.724066390041494, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.398956422021686e-07, | |
| "loss": 0.0, | |
| "step": 2277 | |
| }, | |
| { | |
| "epoch": 4.726141078838174, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.288450059795615e-07, | |
| "loss": 0.0, | |
| "step": 2278 | |
| }, | |
| { | |
| "epoch": 4.728215767634855, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.178767559129451e-07, | |
| "loss": 0.0, | |
| "step": 2279 | |
| }, | |
| { | |
| "epoch": 4.730290456431535, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.069909150123889e-07, | |
| "loss": 0.0, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 4.732365145228216, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.961875061150691e-07, | |
| "loss": 0.0, | |
| "step": 2281 | |
| }, | |
| { | |
| "epoch": 4.734439834024896, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.854665518852433e-07, | |
| "loss": 0.0, | |
| "step": 2282 | |
| }, | |
| { | |
| "epoch": 4.736514522821577, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.748280748141822e-07, | |
| "loss": 0.0, | |
| "step": 2283 | |
| }, | |
| { | |
| "epoch": 4.738589211618257, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.642720972201266e-07, | |
| "loss": 0.0, | |
| "step": 2284 | |
| }, | |
| { | |
| "epoch": 4.740663900414938, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.537986412482511e-07, | |
| "loss": 0.0, | |
| "step": 2285 | |
| }, | |
| { | |
| "epoch": 4.742738589211618, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.434077288706064e-07, | |
| "loss": 0.0, | |
| "step": 2286 | |
| }, | |
| { | |
| "epoch": 4.744813278008299, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.330993818860709e-07, | |
| "loss": 0.0, | |
| "step": 2287 | |
| }, | |
| { | |
| "epoch": 4.746887966804979, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.228736219203191e-07, | |
| "loss": 0.0, | |
| "step": 2288 | |
| }, | |
| { | |
| "epoch": 4.74896265560166, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.127304704257641e-07, | |
| "loss": 0.0, | |
| "step": 2289 | |
| }, | |
| { | |
| "epoch": 4.75103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.02669948681518e-07, | |
| "loss": 0.0, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 4.753112033195021, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.926920777933421e-07, | |
| "loss": 0.0, | |
| "step": 2291 | |
| }, | |
| { | |
| "epoch": 4.755186721991701, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.827968786936034e-07, | |
| "loss": 0.0, | |
| "step": 2292 | |
| }, | |
| { | |
| "epoch": 4.757261410788382, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.72984372141252e-07, | |
| "loss": 0.0, | |
| "step": 2293 | |
| }, | |
| { | |
| "epoch": 4.759336099585062, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.632545787217369e-07, | |
| "loss": 0.0, | |
| "step": 2294 | |
| }, | |
| { | |
| "epoch": 4.761410788381743, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.536075188469881e-07, | |
| "loss": 0.0, | |
| "step": 2295 | |
| }, | |
| { | |
| "epoch": 4.763485477178423, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.440432127553896e-07, | |
| "loss": 0.0, | |
| "step": 2296 | |
| }, | |
| { | |
| "epoch": 4.765560165975104, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.34561680511696e-07, | |
| "loss": 0.0, | |
| "step": 2297 | |
| }, | |
| { | |
| "epoch": 4.767634854771784, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.251629420070181e-07, | |
| "loss": 0.0, | |
| "step": 2298 | |
| }, | |
| { | |
| "epoch": 4.769709543568465, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.158470169587837e-07, | |
| "loss": 0.0, | |
| "step": 2299 | |
| }, | |
| { | |
| "epoch": 4.771784232365145, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.066139249106794e-07, | |
| "loss": 0.0, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 4.773858921161826, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.974636852326198e-07, | |
| "loss": 0.0, | |
| "step": 2301 | |
| }, | |
| { | |
| "epoch": 4.775933609958506, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.883963171207118e-07, | |
| "loss": 0.0, | |
| "step": 2302 | |
| }, | |
| { | |
| "epoch": 4.778008298755187, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.794118395971925e-07, | |
| "loss": 0.0, | |
| "step": 2303 | |
| }, | |
| { | |
| "epoch": 4.780082987551867, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.705102715104204e-07, | |
| "loss": 0.0, | |
| "step": 2304 | |
| }, | |
| { | |
| "epoch": 4.782157676348548, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.616916315348219e-07, | |
| "loss": 0.0, | |
| "step": 2305 | |
| }, | |
| { | |
| "epoch": 4.784232365145228, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.529559381708248e-07, | |
| "loss": 0.0, | |
| "step": 2306 | |
| }, | |
| { | |
| "epoch": 4.786307053941909, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.443032097448763e-07, | |
| "loss": 0.0, | |
| "step": 2307 | |
| }, | |
| { | |
| "epoch": 4.788381742738589, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.357334644093492e-07, | |
| "loss": 0.0, | |
| "step": 2308 | |
| }, | |
| { | |
| "epoch": 4.79045643153527, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.272467201425468e-07, | |
| "loss": 0.0, | |
| "step": 2309 | |
| }, | |
| { | |
| "epoch": 4.79253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.188429947486272e-07, | |
| "loss": 0.0, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 4.7946058091286305, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.10522305857608e-07, | |
| "loss": 0.0, | |
| "step": 2311 | |
| }, | |
| { | |
| "epoch": 4.796680497925311, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.0228467092528145e-07, | |
| "loss": 0.0, | |
| "step": 2312 | |
| }, | |
| { | |
| "epoch": 4.7987551867219915, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9413010723321487e-07, | |
| "loss": 0.0, | |
| "step": 2313 | |
| }, | |
| { | |
| "epoch": 4.800829875518672, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.860586318887061e-07, | |
| "loss": 0.0, | |
| "step": 2314 | |
| }, | |
| { | |
| "epoch": 4.8029045643153525, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.780702618247389e-07, | |
| "loss": 0.0, | |
| "step": 2315 | |
| }, | |
| { | |
| "epoch": 4.804979253112033, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.70165013799948e-07, | |
| "loss": 0.0, | |
| "step": 2316 | |
| }, | |
| { | |
| "epoch": 4.8070539419087135, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.623429043985915e-07, | |
| "loss": 0.0, | |
| "step": 2317 | |
| }, | |
| { | |
| "epoch": 4.809128630705394, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.5460395003052097e-07, | |
| "loss": 0.0, | |
| "step": 2318 | |
| }, | |
| { | |
| "epoch": 4.8112033195020745, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4694816693112255e-07, | |
| "loss": 0.0, | |
| "step": 2319 | |
| }, | |
| { | |
| "epoch": 4.813278008298755, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.393755711613134e-07, | |
| "loss": 0.0, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 4.8153526970954355, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.318861786074834e-07, | |
| "loss": 0.0, | |
| "step": 2321 | |
| }, | |
| { | |
| "epoch": 4.817427385892116, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.2448000498148224e-07, | |
| "loss": 0.0, | |
| "step": 2322 | |
| }, | |
| { | |
| "epoch": 4.819502074688796, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.171570658205614e-07, | |
| "loss": 0.0, | |
| "step": 2323 | |
| }, | |
| { | |
| "epoch": 4.821576763485477, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0991737648737865e-07, | |
| "loss": 0.0, | |
| "step": 2324 | |
| }, | |
| { | |
| "epoch": 4.823651452282157, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0276095216992265e-07, | |
| "loss": 0.0, | |
| "step": 2325 | |
| }, | |
| { | |
| "epoch": 4.825726141078838, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.956878078815084e-07, | |
| "loss": 0.0, | |
| "step": 2326 | |
| }, | |
| { | |
| "epoch": 4.827800829875518, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.886979584607463e-07, | |
| "loss": 0.0, | |
| "step": 2327 | |
| }, | |
| { | |
| "epoch": 4.829875518672199, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.817914185714932e-07, | |
| "loss": 0.0, | |
| "step": 2328 | |
| }, | |
| { | |
| "epoch": 4.831950207468879, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.749682027028433e-07, | |
| "loss": 0.0, | |
| "step": 2329 | |
| }, | |
| { | |
| "epoch": 4.83402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6822832516908427e-07, | |
| "loss": 0.0, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 4.83609958506224, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.615718001096612e-07, | |
| "loss": 0.0, | |
| "step": 2331 | |
| }, | |
| { | |
| "epoch": 4.838174273858921, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.549986414891681e-07, | |
| "loss": 0.0, | |
| "step": 2332 | |
| }, | |
| { | |
| "epoch": 4.840248962655601, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.485088630972987e-07, | |
| "loss": 0.0, | |
| "step": 2333 | |
| }, | |
| { | |
| "epoch": 4.842323651452282, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4210247854882464e-07, | |
| "loss": 0.0, | |
| "step": 2334 | |
| }, | |
| { | |
| "epoch": 4.844398340248962, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3577950128357284e-07, | |
| "loss": 0.0, | |
| "step": 2335 | |
| }, | |
| { | |
| "epoch": 4.846473029045643, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2953994456638596e-07, | |
| "loss": 0.0, | |
| "step": 2336 | |
| }, | |
| { | |
| "epoch": 4.848547717842323, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2338382148710424e-07, | |
| "loss": 0.0, | |
| "step": 2337 | |
| }, | |
| { | |
| "epoch": 4.850622406639004, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.173111449605303e-07, | |
| "loss": 0.0, | |
| "step": 2338 | |
| }, | |
| { | |
| "epoch": 4.852697095435684, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.1132192772641113e-07, | |
| "loss": 0.0, | |
| "step": 2339 | |
| }, | |
| { | |
| "epoch": 4.854771784232365, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.054161823494072e-07, | |
| "loss": 0.0, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 4.856846473029045, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.995939212190523e-07, | |
| "loss": 0.0, | |
| "step": 2341 | |
| }, | |
| { | |
| "epoch": 4.858921161825726, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.9385515654975818e-07, | |
| "loss": 0.0, | |
| "step": 2342 | |
| }, | |
| { | |
| "epoch": 4.860995850622407, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8819990038075665e-07, | |
| "loss": 0.0, | |
| "step": 2343 | |
| }, | |
| { | |
| "epoch": 4.863070539419088, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8262816457609522e-07, | |
| "loss": 0.0, | |
| "step": 2344 | |
| }, | |
| { | |
| "epoch": 4.865145228215768, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7713996082460605e-07, | |
| "loss": 0.0, | |
| "step": 2345 | |
| }, | |
| { | |
| "epoch": 4.867219917012449, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.7173530063988363e-07, | |
| "loss": 0.0, | |
| "step": 2346 | |
| }, | |
| { | |
| "epoch": 4.869294605809129, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6641419536024938e-07, | |
| "loss": 0.0, | |
| "step": 2347 | |
| }, | |
| { | |
| "epoch": 4.87136929460581, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6117665614874268e-07, | |
| "loss": 0.0, | |
| "step": 2348 | |
| }, | |
| { | |
| "epoch": 4.87344398340249, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5602269399308977e-07, | |
| "loss": 0.0, | |
| "step": 2349 | |
| }, | |
| { | |
| "epoch": 4.875518672199171, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5095231970568613e-07, | |
| "loss": 0.0, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 4.877593360995851, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4596554392356077e-07, | |
| "loss": 0.0, | |
| "step": 2351 | |
| }, | |
| { | |
| "epoch": 4.8796680497925315, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.4106237710837634e-07, | |
| "loss": 0.0, | |
| "step": 2352 | |
| }, | |
| { | |
| "epoch": 4.881742738589212, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.362428295463847e-07, | |
| "loss": 0.0, | |
| "step": 2353 | |
| }, | |
| { | |
| "epoch": 4.8838174273858925, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.315069113484091e-07, | |
| "loss": 0.0, | |
| "step": 2354 | |
| }, | |
| { | |
| "epoch": 4.885892116182573, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2685463244984432e-07, | |
| "loss": 0.0, | |
| "step": 2355 | |
| }, | |
| { | |
| "epoch": 4.8879668049792535, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2228600261060762e-07, | |
| "loss": 0.0, | |
| "step": 2356 | |
| }, | |
| { | |
| "epoch": 4.890041493775934, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1780103141513454e-07, | |
| "loss": 0.0, | |
| "step": 2357 | |
| }, | |
| { | |
| "epoch": 4.8921161825726145, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1339972827236089e-07, | |
| "loss": 0.0, | |
| "step": 2358 | |
| }, | |
| { | |
| "epoch": 4.894190871369295, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0908210241568295e-07, | |
| "loss": 0.0, | |
| "step": 2359 | |
| }, | |
| { | |
| "epoch": 4.8962655601659755, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.048481629029574e-07, | |
| "loss": 0.0, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 4.898340248962656, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0069791861649247e-07, | |
| "loss": 0.0, | |
| "step": 2361 | |
| }, | |
| { | |
| "epoch": 4.9004149377593365, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.663137826299019e-08, | |
| "loss": 0.0, | |
| "step": 2362 | |
| }, | |
| { | |
| "epoch": 4.902489626556017, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.264855037356856e-08, | |
| "loss": 0.0, | |
| "step": 2363 | |
| }, | |
| { | |
| "epoch": 4.904564315352697, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.874944330371282e-08, | |
| "loss": 0.0, | |
| "step": 2364 | |
| }, | |
| { | |
| "epoch": 4.906639004149378, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.493406523329307e-08, | |
| "loss": 0.0, | |
| "step": 2365 | |
| }, | |
| { | |
| "epoch": 4.908713692946058, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.120242416650214e-08, | |
| "loss": 0.0, | |
| "step": 2366 | |
| }, | |
| { | |
| "epoch": 4.910788381742739, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.75545279318779e-08, | |
| "loss": 0.0, | |
| "step": 2367 | |
| }, | |
| { | |
| "epoch": 4.912863070539419, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.399038418227201e-08, | |
| "loss": 0.0, | |
| "step": 2368 | |
| }, | |
| { | |
| "epoch": 4.9149377593361, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.051000039481892e-08, | |
| "loss": 0.0, | |
| "step": 2369 | |
| }, | |
| { | |
| "epoch": 4.91701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.711338387095367e-08, | |
| "loss": 0.0, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 4.919087136929461, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.380054173636741e-08, | |
| "loss": 0.0, | |
| "step": 2371 | |
| }, | |
| { | |
| "epoch": 4.921161825726141, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.057148094099852e-08, | |
| "loss": 0.0, | |
| "step": 2372 | |
| }, | |
| { | |
| "epoch": 4.923236514522822, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.742620825903267e-08, | |
| "loss": 0.0, | |
| "step": 2373 | |
| }, | |
| { | |
| "epoch": 4.925311203319502, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.4364730288876124e-08, | |
| "loss": 0.0, | |
| "step": 2374 | |
| }, | |
| { | |
| "epoch": 4.927385892116183, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.138705345313355e-08, | |
| "loss": 0.0, | |
| "step": 2375 | |
| }, | |
| { | |
| "epoch": 4.929460580912863, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.849318399861691e-08, | |
| "loss": 0.0, | |
| "step": 2376 | |
| }, | |
| { | |
| "epoch": 4.931535269709544, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.56831279963188e-08, | |
| "loss": 0.0, | |
| "step": 2377 | |
| }, | |
| { | |
| "epoch": 4.933609958506224, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.295689134139025e-08, | |
| "loss": 0.0, | |
| "step": 2378 | |
| }, | |
| { | |
| "epoch": 4.935684647302905, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.03144797531585e-08, | |
| "loss": 0.0, | |
| "step": 2379 | |
| }, | |
| { | |
| "epoch": 4.937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.7755898775073685e-08, | |
| "loss": 0.0, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 4.939834024896266, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.528115377473995e-08, | |
| "loss": 0.0, | |
| "step": 2381 | |
| }, | |
| { | |
| "epoch": 4.941908713692946, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.289024994387102e-08, | |
| "loss": 0.0, | |
| "step": 2382 | |
| }, | |
| { | |
| "epoch": 4.943983402489627, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.058319229829021e-08, | |
| "loss": 0.0, | |
| "step": 2383 | |
| }, | |
| { | |
| "epoch": 4.946058091286307, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.8359985677934852e-08, | |
| "loss": 0.0, | |
| "step": 2384 | |
| }, | |
| { | |
| "epoch": 4.948132780082988, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6220634746820793e-08, | |
| "loss": 0.0, | |
| "step": 2385 | |
| }, | |
| { | |
| "epoch": 4.950207468879668, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4165143993051254e-08, | |
| "loss": 0.0, | |
| "step": 2386 | |
| }, | |
| { | |
| "epoch": 4.952282157676349, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.2193517728794634e-08, | |
| "loss": 0.0, | |
| "step": 2387 | |
| }, | |
| { | |
| "epoch": 4.954356846473029, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.030576009028451e-08, | |
| "loss": 0.0, | |
| "step": 2388 | |
| }, | |
| { | |
| "epoch": 4.95643153526971, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.85018750378152e-08, | |
| "loss": 0.0, | |
| "step": 2389 | |
| }, | |
| { | |
| "epoch": 4.95850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6781866355719544e-08, | |
| "loss": 0.0, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 4.960580912863071, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5145737652364488e-08, | |
| "loss": 0.0, | |
| "step": 2391 | |
| }, | |
| { | |
| "epoch": 4.962655601659751, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3593492360151062e-08, | |
| "loss": 0.0, | |
| "step": 2392 | |
| }, | |
| { | |
| "epoch": 4.964730290456432, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2125133735505501e-08, | |
| "loss": 0.0, | |
| "step": 2393 | |
| }, | |
| { | |
| "epoch": 4.966804979253112, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0740664858870375e-08, | |
| "loss": 0.0, | |
| "step": 2394 | |
| }, | |
| { | |
| "epoch": 4.968879668049793, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.440088634686817e-09, | |
| "loss": 0.0, | |
| "step": 2395 | |
| }, | |
| { | |
| "epoch": 4.970954356846473, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.223407791412285e-09, | |
| "loss": 0.0, | |
| "step": 2396 | |
| }, | |
| { | |
| "epoch": 4.973029045643154, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.090624881498364e-09, | |
| "loss": 0.0, | |
| "step": 2397 | |
| }, | |
| { | |
| "epoch": 4.975103734439834, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.0417422813863205e-09, | |
| "loss": 0.0, | |
| "step": 2398 | |
| }, | |
| { | |
| "epoch": 4.977178423236515, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.07676219150266e-09, | |
| "loss": 0.0, | |
| "step": 2399 | |
| }, | |
| { | |
| "epoch": 4.979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.195686636259133e-09, | |
| "loss": 0.0, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 4.981327800829876, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.39851746404829e-09, | |
| "loss": 0.0, | |
| "step": 2401 | |
| }, | |
| { | |
| "epoch": 4.983402489626556, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.6852563472390402e-09, | |
| "loss": 0.0, | |
| "step": 2402 | |
| }, | |
| { | |
| "epoch": 4.985477178423237, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0559047821588906e-09, | |
| "loss": 0.0, | |
| "step": 2403 | |
| }, | |
| { | |
| "epoch": 4.987551867219917, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.5104640891161482e-09, | |
| "loss": 0.0, | |
| "step": 2404 | |
| }, | |
| { | |
| "epoch": 4.9896265560165975, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.048935412382157e-09, | |
| "loss": 0.0, | |
| "step": 2405 | |
| }, | |
| { | |
| "epoch": 4.991701244813278, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.713197201868582e-10, | |
| "loss": 0.0, | |
| "step": 2406 | |
| }, | |
| { | |
| "epoch": 4.9937759336099585, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.776178047187884e-10, | |
| "loss": 0.0, | |
| "step": 2407 | |
| }, | |
| { | |
| "epoch": 4.995850622406639, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.678302821339628e-10, | |
| "loss": 0.0, | |
| "step": 2408 | |
| }, | |
| { | |
| "epoch": 4.9979253112033195, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.195759253811105e-11, | |
| "loss": 0.0, | |
| "step": 2409 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 0.0, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 2410, | |
| "total_flos": 1.3576202644094976e+17, | |
| "train_loss": 0.0, | |
| "train_runtime": 61187.0502, | |
| "train_samples_per_second": 20.161, | |
| "train_steps_per_second": 0.039 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 2410, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.3576202644094976e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |