Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_metric": 0.4351051665913261, | |
| "best_model_checkpoint": "/m/triton/scratch/elec/puhe/p/palp3/MUCS/indicwav2vec_outputs/pd_warmup_500/s300_shuff100/checkpoint-1000", | |
| "epoch": 1.6, | |
| "eval_steps": 1000, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0016, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "loss": 65.7309, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0032, | |
| "grad_norm": 19.431121826171875, | |
| "learning_rate": 1.2e-06, | |
| "loss": 40.497, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0048, | |
| "grad_norm": 20.21518898010254, | |
| "learning_rate": 2.4e-06, | |
| "loss": 36.2325, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0064, | |
| "grad_norm": 11.604636192321777, | |
| "learning_rate": 3.6e-06, | |
| "loss": 27.9292, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.008, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.6e-06, | |
| "loss": 27.842, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0096, | |
| "grad_norm": 13.326777458190918, | |
| "learning_rate": 4.8e-06, | |
| "loss": 28.3298, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0112, | |
| "grad_norm": 12.479119300842285, | |
| "learning_rate": 5.999999999999999e-06, | |
| "loss": 28.7461, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.0128, | |
| "grad_norm": 11.469672203063965, | |
| "learning_rate": 7.2e-06, | |
| "loss": 26.4075, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0144, | |
| "grad_norm": 10.416142463684082, | |
| "learning_rate": 8.4e-06, | |
| "loss": 23.0338, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.016, | |
| "grad_norm": 10.866898536682129, | |
| "learning_rate": 9.6e-06, | |
| "loss": 25.844, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0176, | |
| "grad_norm": 11.97667121887207, | |
| "learning_rate": 1.0799999999999998e-05, | |
| "loss": 24.9471, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.0192, | |
| "grad_norm": 10.51934814453125, | |
| "learning_rate": 1.1999999999999999e-05, | |
| "loss": 24.6028, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0208, | |
| "grad_norm": 10.643861770629883, | |
| "learning_rate": 1.3199999999999997e-05, | |
| "loss": 23.1144, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.0224, | |
| "grad_norm": 11.642038345336914, | |
| "learning_rate": 1.44e-05, | |
| "loss": 23.0041, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.024, | |
| "grad_norm": 10.239847183227539, | |
| "learning_rate": 1.5599999999999996e-05, | |
| "loss": 22.1151, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.0256, | |
| "grad_norm": 10.31641960144043, | |
| "learning_rate": 1.68e-05, | |
| "loss": 20.955, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.0272, | |
| "grad_norm": 10.14737606048584, | |
| "learning_rate": 1.7999999999999997e-05, | |
| "loss": 20.0026, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.0288, | |
| "grad_norm": 10.14389419555664, | |
| "learning_rate": 1.92e-05, | |
| "loss": 20.053, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.0304, | |
| "grad_norm": 13.049298286437988, | |
| "learning_rate": 2.04e-05, | |
| "loss": 23.2289, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.032, | |
| "grad_norm": 10.918747901916504, | |
| "learning_rate": 2.1599999999999996e-05, | |
| "loss": 20.6362, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0336, | |
| "grad_norm": 10.782907485961914, | |
| "learning_rate": 2.28e-05, | |
| "loss": 19.6928, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.0352, | |
| "grad_norm": 13.15986156463623, | |
| "learning_rate": 2.3999999999999997e-05, | |
| "loss": 22.1249, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.0368, | |
| "grad_norm": 11.684220314025879, | |
| "learning_rate": 2.52e-05, | |
| "loss": 20.4745, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.0384, | |
| "grad_norm": 11.818272590637207, | |
| "learning_rate": 2.6399999999999995e-05, | |
| "loss": 20.3504, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 12.81021499633789, | |
| "learning_rate": 2.7599999999999997e-05, | |
| "loss": 21.0093, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.0416, | |
| "grad_norm": 14.015459060668945, | |
| "learning_rate": 2.88e-05, | |
| "loss": 22.1517, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.0432, | |
| "grad_norm": 11.829425811767578, | |
| "learning_rate": 2.9999999999999997e-05, | |
| "loss": 18.2946, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.0448, | |
| "grad_norm": 15.79493236541748, | |
| "learning_rate": 3.119999999999999e-05, | |
| "loss": 22.9019, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.0464, | |
| "grad_norm": 14.349289894104004, | |
| "learning_rate": 3.2399999999999995e-05, | |
| "loss": 20.221, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.048, | |
| "grad_norm": 14.111164093017578, | |
| "learning_rate": 3.36e-05, | |
| "loss": 18.9742, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.0496, | |
| "grad_norm": 15.603620529174805, | |
| "learning_rate": 3.48e-05, | |
| "loss": 20.2483, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.0512, | |
| "grad_norm": 16.44049072265625, | |
| "learning_rate": 3.5999999999999994e-05, | |
| "loss": 19.9449, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.0528, | |
| "grad_norm": 18.68276596069336, | |
| "learning_rate": 3.7199999999999996e-05, | |
| "loss": 21.1125, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.0544, | |
| "grad_norm": 14.831990242004395, | |
| "learning_rate": 3.84e-05, | |
| "loss": 17.0528, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.056, | |
| "grad_norm": 16.635009765625, | |
| "learning_rate": 3.96e-05, | |
| "loss": 18.3324, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.0576, | |
| "grad_norm": 18.090103149414062, | |
| "learning_rate": 4.08e-05, | |
| "loss": 18.495, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.0592, | |
| "grad_norm": 19.209562301635742, | |
| "learning_rate": 4.2e-05, | |
| "loss": 18.7353, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.0608, | |
| "grad_norm": 19.63134765625, | |
| "learning_rate": 4.319999999999999e-05, | |
| "loss": 17.8893, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.0624, | |
| "grad_norm": 20.713550567626953, | |
| "learning_rate": 4.4399999999999995e-05, | |
| "loss": 17.9679, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 25.246625900268555, | |
| "learning_rate": 4.56e-05, | |
| "loss": 19.6261, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0656, | |
| "grad_norm": 28.108665466308594, | |
| "learning_rate": 4.68e-05, | |
| "loss": 20.795, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.0672, | |
| "grad_norm": 26.678646087646484, | |
| "learning_rate": 4.7999999999999994e-05, | |
| "loss": 19.3122, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.0688, | |
| "grad_norm": 28.03876304626465, | |
| "learning_rate": 4.9199999999999997e-05, | |
| "loss": 19.0949, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.0704, | |
| "grad_norm": 28.40298080444336, | |
| "learning_rate": 5.04e-05, | |
| "loss": 18.7253, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.072, | |
| "grad_norm": 30.833574295043945, | |
| "learning_rate": 5.1599999999999994e-05, | |
| "loss": 18.9767, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.0736, | |
| "grad_norm": 25.621904373168945, | |
| "learning_rate": 5.279999999999999e-05, | |
| "loss": 16.0377, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.0752, | |
| "grad_norm": 26.582237243652344, | |
| "learning_rate": 5.399999999999999e-05, | |
| "loss": 15.6131, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.0768, | |
| "grad_norm": 34.89401626586914, | |
| "learning_rate": 5.519999999999999e-05, | |
| "loss": 17.8755, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.0784, | |
| "grad_norm": 32.71629333496094, | |
| "learning_rate": 5.6399999999999995e-05, | |
| "loss": 16.0825, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 27.743824005126953, | |
| "learning_rate": 5.76e-05, | |
| "loss": 13.6992, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0816, | |
| "grad_norm": Infinity, | |
| "learning_rate": 5.76e-05, | |
| "loss": 30.6838, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.0832, | |
| "grad_norm": 85.50408935546875, | |
| "learning_rate": 5.88e-05, | |
| "loss": 26.4379, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.0848, | |
| "grad_norm": Infinity, | |
| "learning_rate": 5.88e-05, | |
| "loss": 27.0726, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.0864, | |
| "grad_norm": 50.16768264770508, | |
| "learning_rate": 5.9999999999999995e-05, | |
| "loss": 18.3707, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.088, | |
| "grad_norm": 131.34494018554688, | |
| "learning_rate": 6.12e-05, | |
| "loss": 23.1862, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.0896, | |
| "grad_norm": 44.524417877197266, | |
| "learning_rate": 6.239999999999999e-05, | |
| "loss": 14.7586, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.0912, | |
| "grad_norm": 71.3255615234375, | |
| "learning_rate": 6.359999999999999e-05, | |
| "loss": 19.4668, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.0928, | |
| "grad_norm": 43.701072692871094, | |
| "learning_rate": 6.479999999999999e-05, | |
| "loss": 13.1064, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.0944, | |
| "grad_norm": 45.97714614868164, | |
| "learning_rate": 6.599999999999999e-05, | |
| "loss": 12.3225, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 49.00947952270508, | |
| "learning_rate": 6.72e-05, | |
| "loss": 12.3515, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0976, | |
| "grad_norm": 54.81338119506836, | |
| "learning_rate": 6.84e-05, | |
| "loss": 12.8657, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.0992, | |
| "grad_norm": 44.2459831237793, | |
| "learning_rate": 6.96e-05, | |
| "loss": 10.2856, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.1008, | |
| "grad_norm": 73.28941345214844, | |
| "learning_rate": 7.079999999999999e-05, | |
| "loss": 9.7418, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.1024, | |
| "grad_norm": 40.017478942871094, | |
| "learning_rate": 7.199999999999999e-05, | |
| "loss": 8.9116, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.104, | |
| "grad_norm": 35.54833221435547, | |
| "learning_rate": 7.319999999999999e-05, | |
| "loss": 7.9677, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.1056, | |
| "grad_norm": 42.94904708862305, | |
| "learning_rate": 7.439999999999999e-05, | |
| "loss": 8.5134, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.1072, | |
| "grad_norm": 38.947715759277344, | |
| "learning_rate": 7.56e-05, | |
| "loss": 7.7674, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.1088, | |
| "grad_norm": 35.40559768676758, | |
| "learning_rate": 7.68e-05, | |
| "loss": 7.2294, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.1104, | |
| "grad_norm": 31.678964614868164, | |
| "learning_rate": 7.8e-05, | |
| "loss": 6.7146, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.112, | |
| "grad_norm": 31.80811309814453, | |
| "learning_rate": 7.92e-05, | |
| "loss": 6.5397, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.1136, | |
| "grad_norm": 25.740388870239258, | |
| "learning_rate": 8.04e-05, | |
| "loss": 6.0079, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.1152, | |
| "grad_norm": 23.663726806640625, | |
| "learning_rate": 8.16e-05, | |
| "loss": 5.7781, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.1168, | |
| "grad_norm": 20.862640380859375, | |
| "learning_rate": 8.28e-05, | |
| "loss": 5.5184, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.1184, | |
| "grad_norm": 17.784168243408203, | |
| "learning_rate": 8.4e-05, | |
| "loss": 5.3262, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 16.173917770385742, | |
| "learning_rate": 8.519999999999998e-05, | |
| "loss": 5.2462, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.1216, | |
| "grad_norm": 11.109197616577148, | |
| "learning_rate": 8.639999999999999e-05, | |
| "loss": 4.989, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.1232, | |
| "grad_norm": 9.369626998901367, | |
| "learning_rate": 8.759999999999999e-05, | |
| "loss": 4.9122, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.1248, | |
| "grad_norm": 9.535905838012695, | |
| "learning_rate": 8.879999999999999e-05, | |
| "loss": 4.9956, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.1264, | |
| "grad_norm": 3.752645969390869, | |
| "learning_rate": 8.999999999999999e-05, | |
| "loss": 4.7227, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 4.03261137008667, | |
| "learning_rate": 9.12e-05, | |
| "loss": 4.7404, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1296, | |
| "grad_norm": 3.3785626888275146, | |
| "learning_rate": 9.24e-05, | |
| "loss": 4.6154, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.1312, | |
| "grad_norm": 3.4677908420562744, | |
| "learning_rate": 9.36e-05, | |
| "loss": 4.6707, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.1328, | |
| "grad_norm": 4.760728359222412, | |
| "learning_rate": 9.479999999999999e-05, | |
| "loss": 4.5489, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.1344, | |
| "grad_norm": 2.995011329650879, | |
| "learning_rate": 9.599999999999999e-05, | |
| "loss": 4.5604, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.136, | |
| "grad_norm": 3.4023890495300293, | |
| "learning_rate": 9.719999999999999e-05, | |
| "loss": 4.4197, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.1376, | |
| "grad_norm": 3.294135570526123, | |
| "learning_rate": 9.839999999999999e-05, | |
| "loss": 4.3887, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.1392, | |
| "grad_norm": 2.753955841064453, | |
| "learning_rate": 9.96e-05, | |
| "loss": 4.3751, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.1408, | |
| "grad_norm": 3.3620405197143555, | |
| "learning_rate": 0.0001008, | |
| "loss": 4.2838, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.1424, | |
| "grad_norm": 2.385225534439087, | |
| "learning_rate": 0.000102, | |
| "loss": 4.183, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.144, | |
| "grad_norm": 2.7207183837890625, | |
| "learning_rate": 0.00010319999999999999, | |
| "loss": 4.1382, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.1456, | |
| "grad_norm": 2.0092227458953857, | |
| "learning_rate": 0.00010439999999999999, | |
| "loss": 4.1519, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.1472, | |
| "grad_norm": 1.8385318517684937, | |
| "learning_rate": 0.00010559999999999998, | |
| "loss": 4.136, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.1488, | |
| "grad_norm": 1.3982303142547607, | |
| "learning_rate": 0.00010679999999999998, | |
| "loss": 4.084, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.1504, | |
| "grad_norm": 1.4304462671279907, | |
| "learning_rate": 0.00010799999999999998, | |
| "loss": 4.0993, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.152, | |
| "grad_norm": 5.04845666885376, | |
| "learning_rate": 0.00010919999999999998, | |
| "loss": 4.1147, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.1536, | |
| "grad_norm": 1.1188616752624512, | |
| "learning_rate": 0.00011039999999999999, | |
| "loss": 4.0138, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.1552, | |
| "grad_norm": 2.9660868644714355, | |
| "learning_rate": 0.00011159999999999999, | |
| "loss": 4.0886, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.1568, | |
| "grad_norm": 2.909925699234009, | |
| "learning_rate": 0.00011279999999999999, | |
| "loss": 4.0743, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.1584, | |
| "grad_norm": 2.233374834060669, | |
| "learning_rate": 0.00011399999999999999, | |
| "loss": 4.0643, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 1.9833197593688965, | |
| "learning_rate": 0.0001152, | |
| "loss": 4.1569, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1616, | |
| "grad_norm": 31.450782775878906, | |
| "learning_rate": 0.0001164, | |
| "loss": 4.756, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.1632, | |
| "grad_norm": 11.571601867675781, | |
| "learning_rate": 0.0001176, | |
| "loss": 4.0323, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.1648, | |
| "grad_norm": 9.144487380981445, | |
| "learning_rate": 0.0001188, | |
| "loss": 3.9735, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.1664, | |
| "grad_norm": 16.850412368774414, | |
| "learning_rate": 0.00011999999999999999, | |
| "loss": 4.218, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.168, | |
| "grad_norm": 2.83750319480896, | |
| "learning_rate": 0.00012119999999999999, | |
| "loss": 3.9086, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.1696, | |
| "grad_norm": 1.1910499334335327, | |
| "learning_rate": 0.0001224, | |
| "loss": 3.8875, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.1712, | |
| "grad_norm": 1.7703046798706055, | |
| "learning_rate": 0.0001236, | |
| "loss": 3.9357, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.1728, | |
| "grad_norm": 4.045347690582275, | |
| "learning_rate": 0.00012479999999999997, | |
| "loss": 3.8306, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.1744, | |
| "grad_norm": 3.414834499359131, | |
| "learning_rate": 0.00012599999999999997, | |
| "loss": 3.8273, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.176, | |
| "grad_norm": 5.279017925262451, | |
| "learning_rate": 0.00012719999999999997, | |
| "loss": 3.8331, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1776, | |
| "grad_norm": 2.1371779441833496, | |
| "learning_rate": 0.00012839999999999998, | |
| "loss": 3.8236, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.1792, | |
| "grad_norm": 0.6024636030197144, | |
| "learning_rate": 0.00012959999999999998, | |
| "loss": 3.7802, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.1808, | |
| "grad_norm": 3.284954786300659, | |
| "learning_rate": 0.00013079999999999998, | |
| "loss": 3.837, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.1824, | |
| "grad_norm": 3.2697064876556396, | |
| "learning_rate": 0.00013199999999999998, | |
| "loss": 3.8099, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.184, | |
| "grad_norm": 2.267110824584961, | |
| "learning_rate": 0.00013319999999999999, | |
| "loss": 3.7916, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.1856, | |
| "grad_norm": 0.9686329364776611, | |
| "learning_rate": 0.0001344, | |
| "loss": 3.7612, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.1872, | |
| "grad_norm": 1.1181795597076416, | |
| "learning_rate": 0.0001356, | |
| "loss": 3.817, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.1888, | |
| "grad_norm": 4.291112899780273, | |
| "learning_rate": 0.0001368, | |
| "loss": 3.75, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.1904, | |
| "grad_norm": 1.1991240978240967, | |
| "learning_rate": 0.000138, | |
| "loss": 3.8755, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 1.800611138343811, | |
| "learning_rate": 0.0001392, | |
| "loss": 3.7636, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1936, | |
| "grad_norm": 0.6300956606864929, | |
| "learning_rate": 0.0001404, | |
| "loss": 3.8781, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.1952, | |
| "grad_norm": 1.9492263793945312, | |
| "learning_rate": 0.00014159999999999997, | |
| "loss": 3.758, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.1968, | |
| "grad_norm": 1.5942705869674683, | |
| "learning_rate": 0.00014279999999999997, | |
| "loss": 3.7344, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.1984, | |
| "grad_norm": 1.0828086137771606, | |
| "learning_rate": 0.00014399999999999998, | |
| "loss": 3.7535, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 1.54654860496521, | |
| "learning_rate": 0.00014519999999999998, | |
| "loss": 3.7764, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.2016, | |
| "grad_norm": 3.488430976867676, | |
| "learning_rate": 0.00014639999999999998, | |
| "loss": 3.8125, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.2032, | |
| "grad_norm": 1.4746885299682617, | |
| "learning_rate": 0.00014759999999999998, | |
| "loss": 3.8042, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.2048, | |
| "grad_norm": 2.114961862564087, | |
| "learning_rate": 0.00014879999999999998, | |
| "loss": 3.7654, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.2064, | |
| "grad_norm": 1.086930751800537, | |
| "learning_rate": 0.00015, | |
| "loss": 3.797, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.208, | |
| "grad_norm": 0.5767809152603149, | |
| "learning_rate": 0.0001512, | |
| "loss": 3.7507, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2096, | |
| "grad_norm": 2.0122199058532715, | |
| "learning_rate": 0.0001524, | |
| "loss": 3.7508, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.2112, | |
| "grad_norm": 3.8814480304718018, | |
| "learning_rate": 0.0001536, | |
| "loss": 3.8003, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.2128, | |
| "grad_norm": 1.92637038230896, | |
| "learning_rate": 0.0001548, | |
| "loss": 3.8009, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.2144, | |
| "grad_norm": 0.7897951006889343, | |
| "learning_rate": 0.000156, | |
| "loss": 3.7989, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.216, | |
| "grad_norm": 4.2987751960754395, | |
| "learning_rate": 0.0001572, | |
| "loss": 3.7605, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.2176, | |
| "grad_norm": 1.7444305419921875, | |
| "learning_rate": 0.0001584, | |
| "loss": 3.8734, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.2192, | |
| "grad_norm": 3.6664788722991943, | |
| "learning_rate": 0.0001596, | |
| "loss": 3.7828, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.2208, | |
| "grad_norm": 1.8154056072235107, | |
| "learning_rate": 0.0001608, | |
| "loss": 3.9124, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.2224, | |
| "grad_norm": 0.8211868405342102, | |
| "learning_rate": 0.000162, | |
| "loss": 3.7861, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.224, | |
| "grad_norm": 1.5533865690231323, | |
| "learning_rate": 0.0001632, | |
| "loss": 3.787, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2256, | |
| "grad_norm": 2.957888603210449, | |
| "learning_rate": 0.0001644, | |
| "loss": 3.7569, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.2272, | |
| "grad_norm": 3.3956074714660645, | |
| "learning_rate": 0.0001656, | |
| "loss": 3.854, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.2288, | |
| "grad_norm": 4.3899993896484375, | |
| "learning_rate": 0.0001668, | |
| "loss": 3.8498, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.2304, | |
| "grad_norm": 2.408184051513672, | |
| "learning_rate": 0.000168, | |
| "loss": 3.7483, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.232, | |
| "grad_norm": 2.7876155376434326, | |
| "learning_rate": 0.00016919999999999997, | |
| "loss": 3.7817, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.2336, | |
| "grad_norm": 2.2329955101013184, | |
| "learning_rate": 0.00017039999999999997, | |
| "loss": 3.7676, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.2352, | |
| "grad_norm": 1.5696635246276855, | |
| "learning_rate": 0.00017159999999999997, | |
| "loss": 3.811, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.2368, | |
| "grad_norm": 2.209275007247925, | |
| "learning_rate": 0.00017279999999999997, | |
| "loss": 3.8107, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.2384, | |
| "grad_norm": 1.1010030508041382, | |
| "learning_rate": 0.00017399999999999997, | |
| "loss": 3.8716, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 3.2085022926330566, | |
| "learning_rate": 0.00017519999999999998, | |
| "loss": 3.9153, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2416, | |
| "grad_norm": 27.279512405395508, | |
| "learning_rate": 0.00017639999999999998, | |
| "loss": 4.6372, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.2432, | |
| "grad_norm": 6.734384536743164, | |
| "learning_rate": 0.00017759999999999998, | |
| "loss": 3.8794, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.2448, | |
| "grad_norm": 8.395252227783203, | |
| "learning_rate": 0.00017879999999999998, | |
| "loss": 3.9047, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.2464, | |
| "grad_norm": 2.386127471923828, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 3.8455, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.248, | |
| "grad_norm": 5.339539051055908, | |
| "learning_rate": 0.00018119999999999999, | |
| "loss": 4.004, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.2496, | |
| "grad_norm": 6.078152179718018, | |
| "learning_rate": 0.0001824, | |
| "loss": 3.8299, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.2512, | |
| "grad_norm": 6.789322853088379, | |
| "learning_rate": 0.0001836, | |
| "loss": 3.863, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.2528, | |
| "grad_norm": 6.238964557647705, | |
| "learning_rate": 0.0001848, | |
| "loss": 3.8967, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.2544, | |
| "grad_norm": 4.713714122772217, | |
| "learning_rate": 0.000186, | |
| "loss": 3.8696, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "grad_norm": 2.388154983520508, | |
| "learning_rate": 0.0001872, | |
| "loss": 3.752, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2576, | |
| "grad_norm": 3.3313629627227783, | |
| "learning_rate": 0.00018839999999999997, | |
| "loss": 3.7579, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.2592, | |
| "grad_norm": 5.692492485046387, | |
| "learning_rate": 0.00018959999999999997, | |
| "loss": 3.7794, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.2608, | |
| "grad_norm": 5.304704189300537, | |
| "learning_rate": 0.00019079999999999998, | |
| "loss": 3.8021, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.2624, | |
| "grad_norm": 5.1804118156433105, | |
| "learning_rate": 0.00019199999999999998, | |
| "loss": 3.7633, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.264, | |
| "grad_norm": 3.4341394901275635, | |
| "learning_rate": 0.00019319999999999998, | |
| "loss": 3.802, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.2656, | |
| "grad_norm": 0.7816358804702759, | |
| "learning_rate": 0.00019439999999999998, | |
| "loss": 3.7327, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.2672, | |
| "grad_norm": 3.8968913555145264, | |
| "learning_rate": 0.00019559999999999998, | |
| "loss": 3.7701, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.2688, | |
| "grad_norm": 6.041684150695801, | |
| "learning_rate": 0.00019679999999999999, | |
| "loss": 3.7651, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.2704, | |
| "grad_norm": 1.5194474458694458, | |
| "learning_rate": 0.000198, | |
| "loss": 3.9153, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.272, | |
| "grad_norm": 5.066249847412109, | |
| "learning_rate": 0.0001992, | |
| "loss": 3.7799, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2736, | |
| "grad_norm": 2.518998146057129, | |
| "learning_rate": 0.0002004, | |
| "loss": 3.7326, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.2752, | |
| "grad_norm": 0.759394645690918, | |
| "learning_rate": 0.0002016, | |
| "loss": 3.7889, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.2768, | |
| "grad_norm": 3.221085786819458, | |
| "learning_rate": 0.0002028, | |
| "loss": 3.709, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.2784, | |
| "grad_norm": 5.799932956695557, | |
| "learning_rate": 0.000204, | |
| "loss": 3.8104, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 4.494676113128662, | |
| "learning_rate": 0.0002052, | |
| "loss": 3.7602, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.2816, | |
| "grad_norm": 1.4825067520141602, | |
| "learning_rate": 0.00020639999999999998, | |
| "loss": 3.6961, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.2832, | |
| "grad_norm": 1.2989227771759033, | |
| "learning_rate": 0.00020759999999999998, | |
| "loss": 3.7053, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.2848, | |
| "grad_norm": 2.4774816036224365, | |
| "learning_rate": 0.00020879999999999998, | |
| "loss": 3.7072, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.2864, | |
| "grad_norm": 0.7821967005729675, | |
| "learning_rate": 0.00020999999999999998, | |
| "loss": 3.7205, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "grad_norm": 2.0641937255859375, | |
| "learning_rate": 0.00021119999999999996, | |
| "loss": 3.7218, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.2896, | |
| "grad_norm": 0.7465824484825134, | |
| "learning_rate": 0.00021239999999999996, | |
| "loss": 3.7159, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.2912, | |
| "grad_norm": 1.9227252006530762, | |
| "learning_rate": 0.00021359999999999996, | |
| "loss": 3.7154, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.2928, | |
| "grad_norm": 2.588310480117798, | |
| "learning_rate": 0.00021479999999999996, | |
| "loss": 3.7532, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.2944, | |
| "grad_norm": 0.8672159314155579, | |
| "learning_rate": 0.00021599999999999996, | |
| "loss": 3.7199, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.296, | |
| "grad_norm": 2.015648365020752, | |
| "learning_rate": 0.00021719999999999997, | |
| "loss": 3.7661, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.2976, | |
| "grad_norm": 1.280931830406189, | |
| "learning_rate": 0.00021839999999999997, | |
| "loss": 3.7427, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.2992, | |
| "grad_norm": 1.2730371952056885, | |
| "learning_rate": 0.00021959999999999997, | |
| "loss": 3.8019, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.3008, | |
| "grad_norm": 0.9145507216453552, | |
| "learning_rate": 0.00022079999999999997, | |
| "loss": 3.7265, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.3024, | |
| "grad_norm": 1.7408198118209839, | |
| "learning_rate": 0.00022199999999999998, | |
| "loss": 3.6761, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.304, | |
| "grad_norm": 4.375244140625, | |
| "learning_rate": 0.00022319999999999998, | |
| "loss": 3.7797, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3056, | |
| "grad_norm": 2.859325647354126, | |
| "learning_rate": 0.00022439999999999998, | |
| "loss": 3.7509, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.3072, | |
| "grad_norm": 2.0287702083587646, | |
| "learning_rate": 0.00022559999999999998, | |
| "loss": 3.7396, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.3088, | |
| "grad_norm": 0.7529137134552002, | |
| "learning_rate": 0.00022679999999999998, | |
| "loss": 3.7297, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.3104, | |
| "grad_norm": 0.8018739223480225, | |
| "learning_rate": 0.00022799999999999999, | |
| "loss": 3.758, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.312, | |
| "grad_norm": 1.8517733812332153, | |
| "learning_rate": 0.0002292, | |
| "loss": 3.7313, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.3136, | |
| "grad_norm": 1.8108242750167847, | |
| "learning_rate": 0.0002304, | |
| "loss": 3.726, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.3152, | |
| "grad_norm": 1.7063705921173096, | |
| "learning_rate": 0.0002316, | |
| "loss": 3.6929, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.3168, | |
| "grad_norm": 1.4768927097320557, | |
| "learning_rate": 0.0002328, | |
| "loss": 3.7741, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.3184, | |
| "grad_norm": 2.0207772254943848, | |
| "learning_rate": 0.000234, | |
| "loss": 3.8361, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.000234, | |
| "loss": 3.9766, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3216, | |
| "grad_norm": 28.53923797607422, | |
| "learning_rate": 0.0002352, | |
| "loss": 4.9473, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.3232, | |
| "grad_norm": 19.729700088500977, | |
| "learning_rate": 0.0002364, | |
| "loss": 3.8604, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.3248, | |
| "grad_norm": 15.63243293762207, | |
| "learning_rate": 0.0002376, | |
| "loss": 4.1119, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.3264, | |
| "grad_norm": 4.515649795532227, | |
| "learning_rate": 0.0002388, | |
| "loss": 3.7548, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.328, | |
| "grad_norm": 3.835597038269043, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 3.7632, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.3296, | |
| "grad_norm": 3.379566192626953, | |
| "learning_rate": 0.00024119999999999998, | |
| "loss": 3.9366, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.3312, | |
| "grad_norm": 1.9478070735931396, | |
| "learning_rate": 0.00024239999999999998, | |
| "loss": 3.7263, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.3328, | |
| "grad_norm": 2.06083083152771, | |
| "learning_rate": 0.00024359999999999999, | |
| "loss": 3.6968, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.3344, | |
| "grad_norm": 3.923818826675415, | |
| "learning_rate": 0.0002448, | |
| "loss": 3.7776, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.336, | |
| "grad_norm": 3.4094479084014893, | |
| "learning_rate": 0.00024599999999999996, | |
| "loss": 3.7088, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3376, | |
| "grad_norm": 4.099003314971924, | |
| "learning_rate": 0.0002472, | |
| "loss": 3.7462, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.3392, | |
| "grad_norm": 0.9599695205688477, | |
| "learning_rate": 0.00024839999999999997, | |
| "loss": 3.7051, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.3408, | |
| "grad_norm": 3.5445761680603027, | |
| "learning_rate": 0.00024959999999999994, | |
| "loss": 3.6189, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.3424, | |
| "grad_norm": 3.2550857067108154, | |
| "learning_rate": 0.00025079999999999997, | |
| "loss": 3.5834, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.344, | |
| "grad_norm": 1.9503517150878906, | |
| "learning_rate": 0.00025199999999999995, | |
| "loss": 3.5801, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.3456, | |
| "grad_norm": 4.3856520652771, | |
| "learning_rate": 0.0002532, | |
| "loss": 3.7169, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.3472, | |
| "grad_norm": 1.5111596584320068, | |
| "learning_rate": 0.00025439999999999995, | |
| "loss": 3.5968, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.3488, | |
| "grad_norm": 1.61884605884552, | |
| "learning_rate": 0.0002556, | |
| "loss": 3.5628, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.3504, | |
| "grad_norm": 0.7278621792793274, | |
| "learning_rate": 0.00025679999999999995, | |
| "loss": 3.6218, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.352, | |
| "grad_norm": 0.9155241847038269, | |
| "learning_rate": 0.000258, | |
| "loss": 3.4842, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.3536, | |
| "grad_norm": 1.681706428527832, | |
| "learning_rate": 0.00025919999999999996, | |
| "loss": 3.4532, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.3552, | |
| "grad_norm": 2.0195343494415283, | |
| "learning_rate": 0.0002604, | |
| "loss": 3.529, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.3568, | |
| "grad_norm": 2.215996026992798, | |
| "learning_rate": 0.00026159999999999996, | |
| "loss": 3.5834, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.3584, | |
| "grad_norm": 3.6210215091705322, | |
| "learning_rate": 0.0002628, | |
| "loss": 3.5076, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 3.3881304264068604, | |
| "learning_rate": 0.00026399999999999997, | |
| "loss": 3.5052, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.3616, | |
| "grad_norm": 1.2712512016296387, | |
| "learning_rate": 0.0002652, | |
| "loss": 3.4167, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.3632, | |
| "grad_norm": 2.784555435180664, | |
| "learning_rate": 0.00026639999999999997, | |
| "loss": 3.4773, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.3648, | |
| "grad_norm": 3.9173593521118164, | |
| "learning_rate": 0.0002676, | |
| "loss": 3.4792, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.3664, | |
| "grad_norm": 5.97532844543457, | |
| "learning_rate": 0.0002688, | |
| "loss": 3.581, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.368, | |
| "grad_norm": 1.1036829948425293, | |
| "learning_rate": 0.00027, | |
| "loss": 3.4026, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.3696, | |
| "grad_norm": 2.0570623874664307, | |
| "learning_rate": 0.0002712, | |
| "loss": 3.3855, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.3712, | |
| "grad_norm": 2.33469295501709, | |
| "learning_rate": 0.0002724, | |
| "loss": 3.3493, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.3728, | |
| "grad_norm": 1.7420787811279297, | |
| "learning_rate": 0.0002736, | |
| "loss": 3.4412, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.3744, | |
| "grad_norm": 1.597578525543213, | |
| "learning_rate": 0.0002748, | |
| "loss": 3.4772, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.376, | |
| "grad_norm": 1.469741702079773, | |
| "learning_rate": 0.000276, | |
| "loss": 3.2345, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.3776, | |
| "grad_norm": 1.8439031839370728, | |
| "learning_rate": 0.0002772, | |
| "loss": 3.2449, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.3792, | |
| "grad_norm": 1.8763359785079956, | |
| "learning_rate": 0.0002784, | |
| "loss": 3.0865, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.3808, | |
| "grad_norm": 1.3372673988342285, | |
| "learning_rate": 0.00027959999999999997, | |
| "loss": 3.2648, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.3824, | |
| "grad_norm": 1.0363004207611084, | |
| "learning_rate": 0.0002808, | |
| "loss": 3.1095, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 1.2411675453186035, | |
| "learning_rate": 0.00028199999999999997, | |
| "loss": 3.1516, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3856, | |
| "grad_norm": 1.1692326068878174, | |
| "learning_rate": 0.00028319999999999994, | |
| "loss": 3.1162, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.3872, | |
| "grad_norm": 1.514488697052002, | |
| "learning_rate": 0.0002844, | |
| "loss": 3.1885, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.3888, | |
| "grad_norm": 1.0548737049102783, | |
| "learning_rate": 0.00028559999999999995, | |
| "loss": 2.9995, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.3904, | |
| "grad_norm": 1.435293436050415, | |
| "learning_rate": 0.0002868, | |
| "loss": 3.1228, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.392, | |
| "grad_norm": 4.880733966827393, | |
| "learning_rate": 0.00028799999999999995, | |
| "loss": 3.523, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3936, | |
| "grad_norm": 2.134737730026245, | |
| "learning_rate": 0.0002892, | |
| "loss": 2.9433, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.3952, | |
| "grad_norm": 1.4167667627334595, | |
| "learning_rate": 0.00029039999999999996, | |
| "loss": 3.0941, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.3968, | |
| "grad_norm": 1.7885382175445557, | |
| "learning_rate": 0.0002916, | |
| "loss": 3.076, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.3984, | |
| "grad_norm": 2.0048017501831055, | |
| "learning_rate": 0.00029279999999999996, | |
| "loss": 3.1318, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 2.751702070236206, | |
| "learning_rate": 0.000294, | |
| "loss": 3.437, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.4016, | |
| "grad_norm": 60.15131759643555, | |
| "learning_rate": 0.00029519999999999997, | |
| "loss": 6.7094, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.4032, | |
| "grad_norm": 11.904341697692871, | |
| "learning_rate": 0.0002964, | |
| "loss": 3.3368, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.4048, | |
| "grad_norm": 5.927310943603516, | |
| "learning_rate": 0.00029759999999999997, | |
| "loss": 3.084, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.4064, | |
| "grad_norm": 1.259347915649414, | |
| "learning_rate": 0.0002988, | |
| "loss": 2.8328, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.408, | |
| "grad_norm": 2.5598220825195312, | |
| "learning_rate": 0.0003, | |
| "loss": 2.9085, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.4096, | |
| "grad_norm": 1.866328239440918, | |
| "learning_rate": 0.00030119999999999995, | |
| "loss": 2.8637, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.4112, | |
| "grad_norm": 1.2088593244552612, | |
| "learning_rate": 0.0003024, | |
| "loss": 2.6899, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.4128, | |
| "grad_norm": 4.605785369873047, | |
| "learning_rate": 0.00030359999999999995, | |
| "loss": 2.9239, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.4144, | |
| "grad_norm": 1.7429507970809937, | |
| "learning_rate": 0.0003048, | |
| "loss": 2.5301, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.416, | |
| "grad_norm": 2.9041364192962646, | |
| "learning_rate": 0.00030599999999999996, | |
| "loss": 2.4356, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4176, | |
| "grad_norm": 2.5931711196899414, | |
| "learning_rate": 0.0003072, | |
| "loss": 2.523, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.4192, | |
| "grad_norm": 1.1678705215454102, | |
| "learning_rate": 0.00030839999999999996, | |
| "loss": 2.2468, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.4208, | |
| "grad_norm": 1.711000680923462, | |
| "learning_rate": 0.0003096, | |
| "loss": 2.5393, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.4224, | |
| "grad_norm": 1.8182960748672485, | |
| "learning_rate": 0.00031079999999999997, | |
| "loss": 2.4138, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.424, | |
| "grad_norm": 1.3307945728302002, | |
| "learning_rate": 0.000312, | |
| "loss": 2.2623, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.4256, | |
| "grad_norm": 1.9444172382354736, | |
| "learning_rate": 0.00031319999999999997, | |
| "loss": 2.1209, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.4272, | |
| "grad_norm": 2.1593079566955566, | |
| "learning_rate": 0.0003144, | |
| "loss": 1.9432, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.4288, | |
| "grad_norm": 2.1220879554748535, | |
| "learning_rate": 0.0003156, | |
| "loss": 2.1439, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.4304, | |
| "grad_norm": 0.8869176506996155, | |
| "learning_rate": 0.0003168, | |
| "loss": 2.1583, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.432, | |
| "grad_norm": 1.2422492504119873, | |
| "learning_rate": 0.000318, | |
| "loss": 1.8846, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.4336, | |
| "grad_norm": 0.8885542154312134, | |
| "learning_rate": 0.0003192, | |
| "loss": 1.9293, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.4352, | |
| "grad_norm": 1.3430697917938232, | |
| "learning_rate": 0.0003204, | |
| "loss": 2.1379, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.4368, | |
| "grad_norm": 2.2559776306152344, | |
| "learning_rate": 0.0003216, | |
| "loss": 2.311, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.4384, | |
| "grad_norm": 1.0061554908752441, | |
| "learning_rate": 0.0003228, | |
| "loss": 1.8522, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.9519514441490173, | |
| "learning_rate": 0.000324, | |
| "loss": 1.9418, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.4416, | |
| "grad_norm": 1.034746527671814, | |
| "learning_rate": 0.0003252, | |
| "loss": 2.1713, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.4432, | |
| "grad_norm": 1.4670822620391846, | |
| "learning_rate": 0.0003264, | |
| "loss": 2.2635, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.4448, | |
| "grad_norm": 1.1949667930603027, | |
| "learning_rate": 0.0003276, | |
| "loss": 2.447, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.4464, | |
| "grad_norm": 2.0934438705444336, | |
| "learning_rate": 0.0003288, | |
| "loss": 1.9578, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.448, | |
| "grad_norm": 1.7934505939483643, | |
| "learning_rate": 0.00033, | |
| "loss": 2.3822, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4496, | |
| "grad_norm": 1.0247673988342285, | |
| "learning_rate": 0.0003312, | |
| "loss": 1.8567, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.4512, | |
| "grad_norm": 2.136781692504883, | |
| "learning_rate": 0.0003324, | |
| "loss": 1.964, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.4528, | |
| "grad_norm": 0.8177749514579773, | |
| "learning_rate": 0.0003336, | |
| "loss": 1.8132, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.4544, | |
| "grad_norm": 1.0221589803695679, | |
| "learning_rate": 0.0003348, | |
| "loss": 1.8668, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.456, | |
| "grad_norm": 1.517808198928833, | |
| "learning_rate": 0.000336, | |
| "loss": 2.1496, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.4576, | |
| "grad_norm": 1.5944926738739014, | |
| "learning_rate": 0.0003372, | |
| "loss": 2.3953, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.4592, | |
| "grad_norm": 2.405046224594116, | |
| "learning_rate": 0.00033839999999999993, | |
| "loss": 1.9663, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.4608, | |
| "grad_norm": 1.6332200765609741, | |
| "learning_rate": 0.00033959999999999996, | |
| "loss": 1.7194, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.4624, | |
| "grad_norm": 1.463877558708191, | |
| "learning_rate": 0.00034079999999999994, | |
| "loss": 1.9922, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.464, | |
| "grad_norm": 1.3162888288497925, | |
| "learning_rate": 0.00034199999999999996, | |
| "loss": 1.9758, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.4656, | |
| "grad_norm": 1.7767980098724365, | |
| "learning_rate": 0.00034319999999999994, | |
| "loss": 2.2791, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.4672, | |
| "grad_norm": 1.7777577638626099, | |
| "learning_rate": 0.00034439999999999997, | |
| "loss": 1.5986, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.4688, | |
| "grad_norm": 1.4968204498291016, | |
| "learning_rate": 0.00034559999999999994, | |
| "loss": 1.7837, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.4704, | |
| "grad_norm": 1.639837384223938, | |
| "learning_rate": 0.0003467999999999999, | |
| "loss": 2.1542, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.472, | |
| "grad_norm": 2.0573577880859375, | |
| "learning_rate": 0.00034799999999999995, | |
| "loss": 2.1574, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.4736, | |
| "grad_norm": 2.6885788440704346, | |
| "learning_rate": 0.0003491999999999999, | |
| "loss": 2.0754, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.4752, | |
| "grad_norm": 2.3689215183258057, | |
| "learning_rate": 0.00035039999999999995, | |
| "loss": 2.3191, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.4768, | |
| "grad_norm": 1.4669629335403442, | |
| "learning_rate": 0.0003515999999999999, | |
| "loss": 2.1903, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.4784, | |
| "grad_norm": 6.631944179534912, | |
| "learning_rate": 0.00035279999999999996, | |
| "loss": 2.4098, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 3.869600296020508, | |
| "learning_rate": 0.00035399999999999993, | |
| "loss": 2.8155, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4816, | |
| "grad_norm": 18.670400619506836, | |
| "learning_rate": 0.00035519999999999996, | |
| "loss": 4.0821, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.4832, | |
| "grad_norm": 6.5373640060424805, | |
| "learning_rate": 0.00035639999999999994, | |
| "loss": 2.6903, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.4848, | |
| "grad_norm": 1.8103080987930298, | |
| "learning_rate": 0.00035759999999999996, | |
| "loss": 1.715, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.4864, | |
| "grad_norm": 2.0340678691864014, | |
| "learning_rate": 0.00035879999999999994, | |
| "loss": 2.2299, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.488, | |
| "grad_norm": 2.8201074600219727, | |
| "learning_rate": 0.00035999999999999997, | |
| "loss": 2.0269, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.4896, | |
| "grad_norm": 3.9219393730163574, | |
| "learning_rate": 0.00036119999999999994, | |
| "loss": 2.0192, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.4912, | |
| "grad_norm": 2.1272470951080322, | |
| "learning_rate": 0.00036239999999999997, | |
| "loss": 2.2065, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.4928, | |
| "grad_norm": 1.2031842470169067, | |
| "learning_rate": 0.00036359999999999995, | |
| "loss": 1.6669, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.4944, | |
| "grad_norm": 2.6753766536712646, | |
| "learning_rate": 0.0003648, | |
| "loss": 1.9611, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.496, | |
| "grad_norm": 3.980130672454834, | |
| "learning_rate": 0.00036599999999999995, | |
| "loss": 1.8961, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.4976, | |
| "grad_norm": 3.4683375358581543, | |
| "learning_rate": 0.0003672, | |
| "loss": 1.6241, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.4992, | |
| "grad_norm": 3.478597402572632, | |
| "learning_rate": 0.00036839999999999996, | |
| "loss": 1.8472, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.5008, | |
| "grad_norm": 1.360845923423767, | |
| "learning_rate": 0.0003696, | |
| "loss": 1.7252, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.5024, | |
| "grad_norm": 0.9226222634315491, | |
| "learning_rate": 0.00037079999999999996, | |
| "loss": 1.4792, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.504, | |
| "grad_norm": 1.2864586114883423, | |
| "learning_rate": 0.000372, | |
| "loss": 1.5637, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.5056, | |
| "grad_norm": 1.395561695098877, | |
| "learning_rate": 0.00037319999999999996, | |
| "loss": 1.41, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.5072, | |
| "grad_norm": 1.9199680089950562, | |
| "learning_rate": 0.0003744, | |
| "loss": 1.8221, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.5088, | |
| "grad_norm": 1.2985516786575317, | |
| "learning_rate": 0.00037559999999999997, | |
| "loss": 1.9545, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.5104, | |
| "grad_norm": 0.861045777797699, | |
| "learning_rate": 0.00037679999999999994, | |
| "loss": 1.2, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.512, | |
| "grad_norm": 1.4724054336547852, | |
| "learning_rate": 0.00037799999999999997, | |
| "loss": 1.9655, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5136, | |
| "grad_norm": 1.121520757675171, | |
| "learning_rate": 0.00037919999999999995, | |
| "loss": 1.6427, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.5152, | |
| "grad_norm": 0.8945459127426147, | |
| "learning_rate": 0.0003804, | |
| "loss": 1.8193, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.5168, | |
| "grad_norm": 1.510231614112854, | |
| "learning_rate": 0.00038159999999999995, | |
| "loss": 1.7606, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.5184, | |
| "grad_norm": 1.2624521255493164, | |
| "learning_rate": 0.0003828, | |
| "loss": 1.8403, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 1.1738777160644531, | |
| "learning_rate": 0.00038399999999999996, | |
| "loss": 1.5676, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.5216, | |
| "grad_norm": 1.4238841533660889, | |
| "learning_rate": 0.0003852, | |
| "loss": 1.3816, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.5232, | |
| "grad_norm": 0.9597876667976379, | |
| "learning_rate": 0.00038639999999999996, | |
| "loss": 1.4629, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.5248, | |
| "grad_norm": 1.1177942752838135, | |
| "learning_rate": 0.0003876, | |
| "loss": 1.4452, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.5264, | |
| "grad_norm": 1.0831985473632812, | |
| "learning_rate": 0.00038879999999999996, | |
| "loss": 1.6668, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.528, | |
| "grad_norm": 1.1985876560211182, | |
| "learning_rate": 0.00039, | |
| "loss": 1.5963, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5296, | |
| "grad_norm": 1.4813580513000488, | |
| "learning_rate": 0.00039119999999999997, | |
| "loss": 1.6768, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.5312, | |
| "grad_norm": 1.4565857648849487, | |
| "learning_rate": 0.0003924, | |
| "loss": 1.6633, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.5328, | |
| "grad_norm": 1.1487796306610107, | |
| "learning_rate": 0.00039359999999999997, | |
| "loss": 1.4567, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.5344, | |
| "grad_norm": 1.2718472480773926, | |
| "learning_rate": 0.0003948, | |
| "loss": 1.8923, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.536, | |
| "grad_norm": 1.393419623374939, | |
| "learning_rate": 0.000396, | |
| "loss": 1.1306, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.5376, | |
| "grad_norm": 2.660311222076416, | |
| "learning_rate": 0.0003972, | |
| "loss": 1.6447, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.5392, | |
| "grad_norm": 1.1137669086456299, | |
| "learning_rate": 0.0003984, | |
| "loss": 1.6307, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.5408, | |
| "grad_norm": 1.1761451959609985, | |
| "learning_rate": 0.0003996, | |
| "loss": 1.6008, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.5424, | |
| "grad_norm": 1.350234866142273, | |
| "learning_rate": 0.0004008, | |
| "loss": 1.523, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.544, | |
| "grad_norm": 1.166507601737976, | |
| "learning_rate": 0.000402, | |
| "loss": 1.2526, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5456, | |
| "grad_norm": 1.3881250619888306, | |
| "learning_rate": 0.0004032, | |
| "loss": 1.7568, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.5472, | |
| "grad_norm": 3.9268593788146973, | |
| "learning_rate": 0.0004044, | |
| "loss": 2.0863, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.5488, | |
| "grad_norm": 1.7067468166351318, | |
| "learning_rate": 0.0004056, | |
| "loss": 1.5831, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.5504, | |
| "grad_norm": 1.4713155031204224, | |
| "learning_rate": 0.00040679999999999997, | |
| "loss": 1.7429, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.552, | |
| "grad_norm": 1.212177038192749, | |
| "learning_rate": 0.000408, | |
| "loss": 1.6922, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.5536, | |
| "grad_norm": 1.6186903715133667, | |
| "learning_rate": 0.00040919999999999997, | |
| "loss": 1.818, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.5552, | |
| "grad_norm": 2.359452247619629, | |
| "learning_rate": 0.0004104, | |
| "loss": 2.4012, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.5568, | |
| "grad_norm": 2.3700032234191895, | |
| "learning_rate": 0.0004116, | |
| "loss": 2.296, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.5584, | |
| "grad_norm": 1.8284653425216675, | |
| "learning_rate": 0.00041279999999999995, | |
| "loss": 2.0039, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 2.491885185241699, | |
| "learning_rate": 0.0004139999999999999, | |
| "loss": 2.089, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5616, | |
| "grad_norm": 2.3013062477111816, | |
| "learning_rate": 0.00041519999999999995, | |
| "loss": 1.6711, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.5632, | |
| "grad_norm": 1.9905917644500732, | |
| "learning_rate": 0.00041639999999999993, | |
| "loss": 1.6065, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.5648, | |
| "grad_norm": 1.480592966079712, | |
| "learning_rate": 0.00041759999999999996, | |
| "loss": 2.2154, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.5664, | |
| "grad_norm": 2.0091054439544678, | |
| "learning_rate": 0.00041879999999999993, | |
| "loss": 1.9469, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.568, | |
| "grad_norm": 4.029331207275391, | |
| "learning_rate": 0.00041999999999999996, | |
| "loss": 2.159, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.5696, | |
| "grad_norm": 3.835162401199341, | |
| "learning_rate": 0.00042119999999999994, | |
| "loss": 1.9669, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.5712, | |
| "grad_norm": 3.283651828765869, | |
| "learning_rate": 0.0004223999999999999, | |
| "loss": 1.9169, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.5728, | |
| "grad_norm": 2.7341041564941406, | |
| "learning_rate": 0.00042359999999999994, | |
| "loss": 1.9569, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.5744, | |
| "grad_norm": 3.72660231590271, | |
| "learning_rate": 0.0004247999999999999, | |
| "loss": 1.9345, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.576, | |
| "grad_norm": 1.2118239402770996, | |
| "learning_rate": 0.00042599999999999995, | |
| "loss": 1.6209, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5776, | |
| "grad_norm": 2.773261547088623, | |
| "learning_rate": 0.0004271999999999999, | |
| "loss": 1.7308, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.5792, | |
| "grad_norm": 1.5158611536026, | |
| "learning_rate": 0.00042839999999999995, | |
| "loss": 1.5566, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.5808, | |
| "grad_norm": 1.7444158792495728, | |
| "learning_rate": 0.0004295999999999999, | |
| "loss": 1.517, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.5824, | |
| "grad_norm": 2.0275840759277344, | |
| "learning_rate": 0.00043079999999999995, | |
| "loss": 1.3961, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.584, | |
| "grad_norm": 1.0864077806472778, | |
| "learning_rate": 0.00043199999999999993, | |
| "loss": 1.3833, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.5856, | |
| "grad_norm": 1.5802364349365234, | |
| "learning_rate": 0.00043319999999999996, | |
| "loss": 1.2144, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.5872, | |
| "grad_norm": 1.1365376710891724, | |
| "learning_rate": 0.00043439999999999993, | |
| "loss": 1.4435, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.5888, | |
| "grad_norm": 2.001816511154175, | |
| "learning_rate": 0.00043559999999999996, | |
| "loss": 1.8125, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.5904, | |
| "grad_norm": 3.9782650470733643, | |
| "learning_rate": 0.00043679999999999994, | |
| "loss": 2.3917, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.592, | |
| "grad_norm": 2.559185743331909, | |
| "learning_rate": 0.00043799999999999997, | |
| "loss": 1.4065, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5936, | |
| "grad_norm": 1.531436562538147, | |
| "learning_rate": 0.00043919999999999994, | |
| "loss": 1.3286, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.5952, | |
| "grad_norm": 2.1857967376708984, | |
| "learning_rate": 0.00044039999999999997, | |
| "loss": 1.3389, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.5968, | |
| "grad_norm": 1.2437169551849365, | |
| "learning_rate": 0.00044159999999999995, | |
| "loss": 1.305, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.5984, | |
| "grad_norm": 1.8238170146942139, | |
| "learning_rate": 0.0004428, | |
| "loss": 1.6173, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.9433510303497314, | |
| "learning_rate": 0.00044399999999999995, | |
| "loss": 1.2818, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.6016, | |
| "grad_norm": 1.7307677268981934, | |
| "learning_rate": 0.0004452, | |
| "loss": 1.3214, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.6032, | |
| "grad_norm": 1.2876639366149902, | |
| "learning_rate": 0.00044639999999999995, | |
| "loss": 1.6527, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.6048, | |
| "grad_norm": 1.0600067377090454, | |
| "learning_rate": 0.0004476, | |
| "loss": 1.4502, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.6064, | |
| "grad_norm": 1.1501784324645996, | |
| "learning_rate": 0.00044879999999999996, | |
| "loss": 1.2737, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.608, | |
| "grad_norm": 0.959336519241333, | |
| "learning_rate": 0.00045, | |
| "loss": 1.7977, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.6096, | |
| "grad_norm": 1.1226460933685303, | |
| "learning_rate": 0.00045119999999999996, | |
| "loss": 1.4758, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.6112, | |
| "grad_norm": 0.9224548935890198, | |
| "learning_rate": 0.00045239999999999994, | |
| "loss": 1.5619, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.6128, | |
| "grad_norm": 0.814278244972229, | |
| "learning_rate": 0.00045359999999999997, | |
| "loss": 1.3943, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.6144, | |
| "grad_norm": 0.9084352850914001, | |
| "learning_rate": 0.00045479999999999994, | |
| "loss": 1.5962, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.616, | |
| "grad_norm": 1.4289610385894775, | |
| "learning_rate": 0.00045599999999999997, | |
| "loss": 1.3404, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.6176, | |
| "grad_norm": 1.2523407936096191, | |
| "learning_rate": 0.00045719999999999995, | |
| "loss": 1.4539, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.6192, | |
| "grad_norm": 1.1009465456008911, | |
| "learning_rate": 0.0004584, | |
| "loss": 1.2913, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.6208, | |
| "grad_norm": 1.369174838066101, | |
| "learning_rate": 0.00045959999999999995, | |
| "loss": 1.7786, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.6224, | |
| "grad_norm": 1.6469858884811401, | |
| "learning_rate": 0.0004608, | |
| "loss": 1.5913, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.624, | |
| "grad_norm": 1.0242924690246582, | |
| "learning_rate": 0.00046199999999999995, | |
| "loss": 1.4396, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6256, | |
| "grad_norm": 1.0944545269012451, | |
| "learning_rate": 0.0004632, | |
| "loss": 1.3629, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.6272, | |
| "grad_norm": 1.2820014953613281, | |
| "learning_rate": 0.00046439999999999996, | |
| "loss": 1.4899, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.6288, | |
| "grad_norm": 1.1084744930267334, | |
| "learning_rate": 0.0004656, | |
| "loss": 1.3476, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.6304, | |
| "grad_norm": 1.2067919969558716, | |
| "learning_rate": 0.00046679999999999996, | |
| "loss": 1.6783, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.632, | |
| "grad_norm": 1.3350406885147095, | |
| "learning_rate": 0.000468, | |
| "loss": 1.8544, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.6336, | |
| "grad_norm": 2.817788600921631, | |
| "learning_rate": 0.00046919999999999997, | |
| "loss": 1.6983, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.6352, | |
| "grad_norm": 1.577996850013733, | |
| "learning_rate": 0.0004704, | |
| "loss": 1.9165, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.6368, | |
| "grad_norm": 1.992092251777649, | |
| "learning_rate": 0.00047159999999999997, | |
| "loss": 2.2309, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.6384, | |
| "grad_norm": 1.5959856510162354, | |
| "learning_rate": 0.0004728, | |
| "loss": 1.977, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 2.959681510925293, | |
| "learning_rate": 0.000474, | |
| "loss": 1.9346, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6416, | |
| "grad_norm": 3.0177066326141357, | |
| "learning_rate": 0.0004752, | |
| "loss": 1.9946, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.6432, | |
| "grad_norm": 3.2861762046813965, | |
| "learning_rate": 0.0004764, | |
| "loss": 1.861, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.6448, | |
| "grad_norm": 33.552921295166016, | |
| "learning_rate": 0.0004776, | |
| "loss": 7.6541, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.6464, | |
| "grad_norm": 1.975786566734314, | |
| "learning_rate": 0.0004788, | |
| "loss": 1.7211, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.648, | |
| "grad_norm": 3.4860012531280518, | |
| "learning_rate": 0.00047999999999999996, | |
| "loss": 1.8209, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.6496, | |
| "grad_norm": 3.5102968215942383, | |
| "learning_rate": 0.0004812, | |
| "loss": 1.7063, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.6512, | |
| "grad_norm": 4.879241943359375, | |
| "learning_rate": 0.00048239999999999996, | |
| "loss": 1.4615, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.6528, | |
| "grad_norm": 2.7851948738098145, | |
| "learning_rate": 0.0004836, | |
| "loss": 1.6937, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.6544, | |
| "grad_norm": 0.9268562197685242, | |
| "learning_rate": 0.00048479999999999997, | |
| "loss": 1.6795, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.656, | |
| "grad_norm": 1.127549409866333, | |
| "learning_rate": 0.000486, | |
| "loss": 1.433, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6576, | |
| "grad_norm": 2.1968774795532227, | |
| "learning_rate": 0.00048719999999999997, | |
| "loss": 1.501, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.6592, | |
| "grad_norm": 3.553457260131836, | |
| "learning_rate": 0.0004883999999999999, | |
| "loss": 1.4432, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.6608, | |
| "grad_norm": 1.8429263830184937, | |
| "learning_rate": 0.0004896, | |
| "loss": 1.3622, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.6624, | |
| "grad_norm": 0.8631522059440613, | |
| "learning_rate": 0.0004907999999999999, | |
| "loss": 1.157, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.664, | |
| "grad_norm": 0.8570797443389893, | |
| "learning_rate": 0.0004919999999999999, | |
| "loss": 1.03, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.6656, | |
| "grad_norm": 1.2855173349380493, | |
| "learning_rate": 0.0004932, | |
| "loss": 1.5622, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.6672, | |
| "grad_norm": 1.406083106994629, | |
| "learning_rate": 0.0004944, | |
| "loss": 1.0664, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.6688, | |
| "grad_norm": 1.5346801280975342, | |
| "learning_rate": 0.0004955999999999999, | |
| "loss": 1.5327, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.6704, | |
| "grad_norm": 1.3836517333984375, | |
| "learning_rate": 0.0004967999999999999, | |
| "loss": 1.6197, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 0.672, | |
| "grad_norm": 1.7024117708206177, | |
| "learning_rate": 0.000498, | |
| "loss": 1.2376, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6736, | |
| "grad_norm": 1.5172358751296997, | |
| "learning_rate": 0.0004991999999999999, | |
| "loss": 1.5432, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 0.6752, | |
| "grad_norm": 1.142734408378601, | |
| "learning_rate": 0.0005003999999999999, | |
| "loss": 1.2484, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 0.6768, | |
| "grad_norm": 0.8977586030960083, | |
| "learning_rate": 0.0005015999999999999, | |
| "loss": 1.3032, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 0.6784, | |
| "grad_norm": 1.1880444288253784, | |
| "learning_rate": 0.0005028, | |
| "loss": 1.4404, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 1.214245080947876, | |
| "learning_rate": 0.0005039999999999999, | |
| "loss": 1.2976, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.6816, | |
| "grad_norm": 0.9443445801734924, | |
| "learning_rate": 0.0005051999999999999, | |
| "loss": 1.418, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 0.6832, | |
| "grad_norm": 1.4814517498016357, | |
| "learning_rate": 0.0005064, | |
| "loss": 1.3793, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.6848, | |
| "grad_norm": 1.3838948011398315, | |
| "learning_rate": 0.0005076, | |
| "loss": 1.1823, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 0.6864, | |
| "grad_norm": 0.9880338311195374, | |
| "learning_rate": 0.0005087999999999999, | |
| "loss": 1.108, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 0.688, | |
| "grad_norm": 1.0871669054031372, | |
| "learning_rate": 0.0005099999999999999, | |
| "loss": 1.3633, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6896, | |
| "grad_norm": 1.2696417570114136, | |
| "learning_rate": 0.0005112, | |
| "loss": 1.3324, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 0.6912, | |
| "grad_norm": 1.4589694738388062, | |
| "learning_rate": 0.0005124, | |
| "loss": 0.9689, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.6928, | |
| "grad_norm": 0.948417603969574, | |
| "learning_rate": 0.0005135999999999999, | |
| "loss": 1.2153, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 0.6944, | |
| "grad_norm": 1.4363794326782227, | |
| "learning_rate": 0.0005147999999999999, | |
| "loss": 1.7676, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 0.696, | |
| "grad_norm": 0.8274084329605103, | |
| "learning_rate": 0.000516, | |
| "loss": 1.247, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.6976, | |
| "grad_norm": 1.2022773027420044, | |
| "learning_rate": 0.0005172, | |
| "loss": 1.9134, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 0.6992, | |
| "grad_norm": 1.0256644487380981, | |
| "learning_rate": 0.0005183999999999999, | |
| "loss": 1.5249, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 0.7008, | |
| "grad_norm": 1.2487961053848267, | |
| "learning_rate": 0.0005195999999999999, | |
| "loss": 1.4572, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 0.7024, | |
| "grad_norm": 1.103989601135254, | |
| "learning_rate": 0.0005208, | |
| "loss": 1.2583, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 0.704, | |
| "grad_norm": 1.1899611949920654, | |
| "learning_rate": 0.000522, | |
| "loss": 1.249, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.7056, | |
| "grad_norm": 1.1985859870910645, | |
| "learning_rate": 0.0005231999999999999, | |
| "loss": 1.3384, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 0.7072, | |
| "grad_norm": 1.278523325920105, | |
| "learning_rate": 0.0005244, | |
| "loss": 1.6037, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 0.7088, | |
| "grad_norm": 2.5164453983306885, | |
| "learning_rate": 0.0005256, | |
| "loss": 1.7037, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 0.7104, | |
| "grad_norm": 1.4125896692276, | |
| "learning_rate": 0.0005267999999999999, | |
| "loss": 1.3628, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.712, | |
| "grad_norm": 1.4269789457321167, | |
| "learning_rate": 0.0005279999999999999, | |
| "loss": 1.8712, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.7136, | |
| "grad_norm": 1.321560263633728, | |
| "learning_rate": 0.0005292, | |
| "loss": 1.4942, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 0.7152, | |
| "grad_norm": 1.5753790140151978, | |
| "learning_rate": 0.0005304, | |
| "loss": 1.9359, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 0.7168, | |
| "grad_norm": 2.2859463691711426, | |
| "learning_rate": 0.0005315999999999999, | |
| "loss": 1.745, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.7184, | |
| "grad_norm": 2.0483174324035645, | |
| "learning_rate": 0.0005327999999999999, | |
| "loss": 1.8343, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 3.6337218284606934, | |
| "learning_rate": 0.000534, | |
| "loss": 2.2459, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7216, | |
| "grad_norm": 13.555660247802734, | |
| "learning_rate": 0.0005352, | |
| "loss": 3.3534, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 0.7232, | |
| "grad_norm": 6.6364850997924805, | |
| "learning_rate": 0.0005363999999999999, | |
| "loss": 2.7515, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 0.7248, | |
| "grad_norm": 3.817852258682251, | |
| "learning_rate": 0.0005376, | |
| "loss": 2.012, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 0.7264, | |
| "grad_norm": 1.707593560218811, | |
| "learning_rate": 0.0005388, | |
| "loss": 1.9942, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 0.728, | |
| "grad_norm": 2.77917218208313, | |
| "learning_rate": 0.00054, | |
| "loss": 1.6863, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.7296, | |
| "grad_norm": 2.7656164169311523, | |
| "learning_rate": 0.0005411999999999999, | |
| "loss": 1.4779, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 0.7312, | |
| "grad_norm": 1.720285177230835, | |
| "learning_rate": 0.0005424, | |
| "loss": 1.8024, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 0.7328, | |
| "grad_norm": 3.847505807876587, | |
| "learning_rate": 0.0005436, | |
| "loss": 1.6034, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 0.7344, | |
| "grad_norm": 2.7850637435913086, | |
| "learning_rate": 0.0005448, | |
| "loss": 1.3834, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 0.736, | |
| "grad_norm": 1.2482507228851318, | |
| "learning_rate": 0.0005459999999999999, | |
| "loss": 1.6495, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7376, | |
| "grad_norm": 1.2714293003082275, | |
| "learning_rate": 0.0005472, | |
| "loss": 1.3752, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 0.7392, | |
| "grad_norm": 1.8939746618270874, | |
| "learning_rate": 0.0005484, | |
| "loss": 1.4186, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 0.7408, | |
| "grad_norm": 2.5669922828674316, | |
| "learning_rate": 0.0005496, | |
| "loss": 1.2681, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 0.7424, | |
| "grad_norm": 1.9449177980422974, | |
| "learning_rate": 0.0005507999999999999, | |
| "loss": 1.3308, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.744, | |
| "grad_norm": 0.872009813785553, | |
| "learning_rate": 0.000552, | |
| "loss": 1.2645, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.7456, | |
| "grad_norm": 0.8539568185806274, | |
| "learning_rate": 0.0005532, | |
| "loss": 1.0258, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 0.7472, | |
| "grad_norm": 1.0018901824951172, | |
| "learning_rate": 0.0005544, | |
| "loss": 1.38, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 0.7488, | |
| "grad_norm": 0.9926770329475403, | |
| "learning_rate": 0.0005556, | |
| "loss": 1.3214, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 0.7504, | |
| "grad_norm": 1.80833101272583, | |
| "learning_rate": 0.0005568, | |
| "loss": 1.3013, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 0.752, | |
| "grad_norm": 1.4460201263427734, | |
| "learning_rate": 0.000558, | |
| "loss": 1.3652, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7536, | |
| "grad_norm": 1.0537903308868408, | |
| "learning_rate": 0.0005591999999999999, | |
| "loss": 1.1968, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 0.7552, | |
| "grad_norm": 1.0886517763137817, | |
| "learning_rate": 0.0005604, | |
| "loss": 1.2747, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 0.7568, | |
| "grad_norm": 1.2867629528045654, | |
| "learning_rate": 0.0005616, | |
| "loss": 1.2078, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 0.7584, | |
| "grad_norm": 1.7365913391113281, | |
| "learning_rate": 0.0005627999999999999, | |
| "loss": 1.0753, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 1.7022826671600342, | |
| "learning_rate": 0.0005639999999999999, | |
| "loss": 1.2664, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.7616, | |
| "grad_norm": 1.3423399925231934, | |
| "learning_rate": 0.0005652, | |
| "loss": 1.3553, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 0.7632, | |
| "grad_norm": 1.3367669582366943, | |
| "learning_rate": 0.0005663999999999999, | |
| "loss": 1.4162, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 0.7648, | |
| "grad_norm": 0.9925369024276733, | |
| "learning_rate": 0.0005675999999999999, | |
| "loss": 1.2366, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 0.7664, | |
| "grad_norm": 1.741668939590454, | |
| "learning_rate": 0.0005688, | |
| "loss": 1.2624, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 0.768, | |
| "grad_norm": 1.771984577178955, | |
| "learning_rate": 0.00057, | |
| "loss": 1.0515, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7696, | |
| "grad_norm": 1.5952296257019043, | |
| "learning_rate": 0.0005711999999999999, | |
| "loss": 1.2872, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 0.7712, | |
| "grad_norm": 1.2628203630447388, | |
| "learning_rate": 0.0005723999999999999, | |
| "loss": 1.0806, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 0.7728, | |
| "grad_norm": 1.4792985916137695, | |
| "learning_rate": 0.0005736, | |
| "loss": 1.962, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 0.7744, | |
| "grad_norm": 2.177412509918213, | |
| "learning_rate": 0.0005747999999999999, | |
| "loss": 1.4523, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 0.776, | |
| "grad_norm": 1.865964412689209, | |
| "learning_rate": 0.0005759999999999999, | |
| "loss": 1.401, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.7776, | |
| "grad_norm": 1.4428671598434448, | |
| "learning_rate": 0.0005771999999999999, | |
| "loss": 1.3322, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 0.7792, | |
| "grad_norm": 0.9237609505653381, | |
| "learning_rate": 0.0005784, | |
| "loss": 1.4704, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 0.7808, | |
| "grad_norm": 2.0012426376342773, | |
| "learning_rate": 0.0005795999999999999, | |
| "loss": 1.6074, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 0.7824, | |
| "grad_norm": 1.1296131610870361, | |
| "learning_rate": 0.0005807999999999999, | |
| "loss": 1.4989, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 0.784, | |
| "grad_norm": 1.091180682182312, | |
| "learning_rate": 0.0005819999999999999, | |
| "loss": 1.4049, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7856, | |
| "grad_norm": 1.2416259050369263, | |
| "learning_rate": 0.0005832, | |
| "loss": 1.8097, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 0.7872, | |
| "grad_norm": 0.9992501139640808, | |
| "learning_rate": 0.0005843999999999999, | |
| "loss": 1.5731, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 0.7888, | |
| "grad_norm": 1.1882905960083008, | |
| "learning_rate": 0.0005855999999999999, | |
| "loss": 1.2349, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 0.7904, | |
| "grad_norm": 1.3324135541915894, | |
| "learning_rate": 0.0005868, | |
| "loss": 1.749, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 0.792, | |
| "grad_norm": 1.3113425970077515, | |
| "learning_rate": 0.000588, | |
| "loss": 1.3155, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.7936, | |
| "grad_norm": 1.333341121673584, | |
| "learning_rate": 0.0005891999999999999, | |
| "loss": 1.0152, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 0.7952, | |
| "grad_norm": 1.886502742767334, | |
| "learning_rate": 0.0005903999999999999, | |
| "loss": 1.5642, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 0.7968, | |
| "grad_norm": 2.012117385864258, | |
| "learning_rate": 0.0005916, | |
| "loss": 1.6069, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 0.7984, | |
| "grad_norm": 2.344853401184082, | |
| "learning_rate": 0.0005928, | |
| "loss": 2.0689, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 2.7430222034454346, | |
| "learning_rate": 0.0005939999999999999, | |
| "loss": 2.81, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8016, | |
| "grad_norm": 2.373655319213867, | |
| "learning_rate": 0.0005951999999999999, | |
| "loss": 1.8218, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 0.8032, | |
| "grad_norm": 1.2365477085113525, | |
| "learning_rate": 0.0005964, | |
| "loss": 1.343, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 0.8048, | |
| "grad_norm": 2.100356101989746, | |
| "learning_rate": 0.0005976, | |
| "loss": 2.0204, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 0.8064, | |
| "grad_norm": 1.6222838163375854, | |
| "learning_rate": 0.0005987999999999999, | |
| "loss": 1.8366, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 0.808, | |
| "grad_norm": 3.295870542526245, | |
| "learning_rate": 0.0006, | |
| "loss": 1.6686, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.8096, | |
| "grad_norm": 3.5636391639709473, | |
| "learning_rate": 0.0005987999999999999, | |
| "loss": 1.958, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 0.8112, | |
| "grad_norm": 4.600498199462891, | |
| "learning_rate": 0.0005976, | |
| "loss": 1.4633, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 0.8128, | |
| "grad_norm": 3.999089241027832, | |
| "learning_rate": 0.0005964, | |
| "loss": 1.591, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 0.8144, | |
| "grad_norm": 2.074601173400879, | |
| "learning_rate": 0.0005951999999999999, | |
| "loss": 1.4589, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 0.816, | |
| "grad_norm": 1.2597025632858276, | |
| "learning_rate": 0.0005939999999999999, | |
| "loss": 1.517, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.8176, | |
| "grad_norm": 1.491461157798767, | |
| "learning_rate": 0.0005928, | |
| "loss": 1.1305, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 0.8192, | |
| "grad_norm": 2.2012178897857666, | |
| "learning_rate": 0.0005916, | |
| "loss": 1.2055, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 0.8208, | |
| "grad_norm": 2.303264617919922, | |
| "learning_rate": 0.0005903999999999999, | |
| "loss": 1.4843, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 0.8224, | |
| "grad_norm": 1.3678765296936035, | |
| "learning_rate": 0.0005891999999999999, | |
| "loss": 1.1635, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 0.824, | |
| "grad_norm": 1.7093764543533325, | |
| "learning_rate": 0.000588, | |
| "loss": 1.1574, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.8256, | |
| "grad_norm": 1.2002806663513184, | |
| "learning_rate": 0.0005868, | |
| "loss": 1.1366, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 0.8272, | |
| "grad_norm": 1.1055371761322021, | |
| "learning_rate": 0.0005855999999999999, | |
| "loss": 1.1778, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 0.8288, | |
| "grad_norm": 0.9321176409721375, | |
| "learning_rate": 0.0005843999999999999, | |
| "loss": 0.8233, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 0.8304, | |
| "grad_norm": 1.3442676067352295, | |
| "learning_rate": 0.0005832, | |
| "loss": 1.2463, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 0.832, | |
| "grad_norm": 0.9121391177177429, | |
| "learning_rate": 0.0005819999999999999, | |
| "loss": 1.3646, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8336, | |
| "grad_norm": 1.1537225246429443, | |
| "learning_rate": 0.0005807999999999999, | |
| "loss": 1.4225, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 0.8352, | |
| "grad_norm": 1.0641944408416748, | |
| "learning_rate": 0.0005795999999999999, | |
| "loss": 1.0161, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 0.8368, | |
| "grad_norm": 0.7178429961204529, | |
| "learning_rate": 0.0005784, | |
| "loss": 1.1851, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 0.8384, | |
| "grad_norm": 1.4418388605117798, | |
| "learning_rate": 0.0005771999999999999, | |
| "loss": 1.8398, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 1.382843255996704, | |
| "learning_rate": 0.0005759999999999999, | |
| "loss": 1.4898, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.8416, | |
| "grad_norm": 1.0729074478149414, | |
| "learning_rate": 0.0005747999999999999, | |
| "loss": 1.2898, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 0.8432, | |
| "grad_norm": 0.9983257055282593, | |
| "learning_rate": 0.0005736, | |
| "loss": 1.1974, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 0.8448, | |
| "grad_norm": 1.1875462532043457, | |
| "learning_rate": 0.0005723999999999999, | |
| "loss": 1.1169, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 0.8464, | |
| "grad_norm": 1.318334698677063, | |
| "learning_rate": 0.0005711999999999999, | |
| "loss": 1.1376, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 0.848, | |
| "grad_norm": 1.537840485572815, | |
| "learning_rate": 0.00057, | |
| "loss": 1.2072, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8496, | |
| "grad_norm": 1.4589056968688965, | |
| "learning_rate": 0.0005688, | |
| "loss": 1.373, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 0.8512, | |
| "grad_norm": 1.019971251487732, | |
| "learning_rate": 0.0005675999999999999, | |
| "loss": 1.0613, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 0.8528, | |
| "grad_norm": 1.0122156143188477, | |
| "learning_rate": 0.0005663999999999999, | |
| "loss": 0.9874, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 0.8544, | |
| "grad_norm": 1.1434595584869385, | |
| "learning_rate": 0.0005652, | |
| "loss": 1.2304, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 0.856, | |
| "grad_norm": 1.2336163520812988, | |
| "learning_rate": 0.0005639999999999999, | |
| "loss": 1.6633, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.8576, | |
| "grad_norm": 1.306872844696045, | |
| "learning_rate": 0.0005627999999999999, | |
| "loss": 1.7359, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 0.8592, | |
| "grad_norm": 1.2644526958465576, | |
| "learning_rate": 0.0005616, | |
| "loss": 1.2328, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 0.8608, | |
| "grad_norm": 1.262831449508667, | |
| "learning_rate": 0.0005604, | |
| "loss": 1.0457, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 0.8624, | |
| "grad_norm": 0.9390996098518372, | |
| "learning_rate": 0.0005591999999999999, | |
| "loss": 1.2251, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 0.864, | |
| "grad_norm": 1.208268165588379, | |
| "learning_rate": 0.000558, | |
| "loss": 1.3572, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8656, | |
| "grad_norm": 1.0636659860610962, | |
| "learning_rate": 0.0005568, | |
| "loss": 1.1701, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 0.8672, | |
| "grad_norm": 0.9668271541595459, | |
| "learning_rate": 0.0005556, | |
| "loss": 1.5845, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 0.8688, | |
| "grad_norm": 0.9891708493232727, | |
| "learning_rate": 0.0005544, | |
| "loss": 1.3511, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 0.8704, | |
| "grad_norm": 1.7175298929214478, | |
| "learning_rate": 0.0005532, | |
| "loss": 1.2839, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 0.872, | |
| "grad_norm": 0.9767908453941345, | |
| "learning_rate": 0.000552, | |
| "loss": 1.5042, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.8736, | |
| "grad_norm": 1.3343541622161865, | |
| "learning_rate": 0.0005507999999999999, | |
| "loss": 1.6681, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 0.8752, | |
| "grad_norm": 1.3749518394470215, | |
| "learning_rate": 0.0005496, | |
| "loss": 1.9624, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 0.8768, | |
| "grad_norm": 2.514359712600708, | |
| "learning_rate": 0.0005484, | |
| "loss": 1.895, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 0.8784, | |
| "grad_norm": 1.514849066734314, | |
| "learning_rate": 0.0005472, | |
| "loss": 1.7603, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 1.4065580368041992, | |
| "learning_rate": 0.0005459999999999999, | |
| "loss": 1.9368, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8816, | |
| "grad_norm": 4.109616756439209, | |
| "learning_rate": 0.0005448, | |
| "loss": 1.9927, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 0.8832, | |
| "grad_norm": 5.660764694213867, | |
| "learning_rate": 0.0005436, | |
| "loss": 2.4962, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 0.8848, | |
| "grad_norm": 2.027193307876587, | |
| "learning_rate": 0.0005424, | |
| "loss": 1.6865, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 0.8864, | |
| "grad_norm": 4.918600559234619, | |
| "learning_rate": 0.0005411999999999999, | |
| "loss": 2.1457, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 0.888, | |
| "grad_norm": 2.522416114807129, | |
| "learning_rate": 0.00054, | |
| "loss": 1.5093, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.8896, | |
| "grad_norm": 1.574242353439331, | |
| "learning_rate": 0.0005388, | |
| "loss": 1.1618, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 0.8912, | |
| "grad_norm": 2.081839084625244, | |
| "learning_rate": 0.0005376, | |
| "loss": 1.2351, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 0.8928, | |
| "grad_norm": 2.582669734954834, | |
| "learning_rate": 0.0005363999999999999, | |
| "loss": 1.6881, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 0.8944, | |
| "grad_norm": 2.058865785598755, | |
| "learning_rate": 0.0005352, | |
| "loss": 1.3369, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 0.896, | |
| "grad_norm": 5.098209381103516, | |
| "learning_rate": 0.000534, | |
| "loss": 1.7725, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8976, | |
| "grad_norm": 4.526047229766846, | |
| "learning_rate": 0.0005327999999999999, | |
| "loss": 1.9443, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 0.8992, | |
| "grad_norm": 3.9119491577148438, | |
| "learning_rate": 0.0005315999999999999, | |
| "loss": 1.5399, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 0.9008, | |
| "grad_norm": 0.8436188101768494, | |
| "learning_rate": 0.0005304, | |
| "loss": 1.1211, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 0.9024, | |
| "grad_norm": 0.8627029061317444, | |
| "learning_rate": 0.0005292, | |
| "loss": 1.2774, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 0.904, | |
| "grad_norm": 1.1870328187942505, | |
| "learning_rate": 0.0005279999999999999, | |
| "loss": 1.2342, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.9056, | |
| "grad_norm": 1.0261473655700684, | |
| "learning_rate": 0.0005267999999999999, | |
| "loss": 1.0405, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 0.9072, | |
| "grad_norm": 0.6334408521652222, | |
| "learning_rate": 0.0005256, | |
| "loss": 1.0547, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 0.9088, | |
| "grad_norm": 0.7928243279457092, | |
| "learning_rate": 0.0005244, | |
| "loss": 0.9138, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 0.9104, | |
| "grad_norm": 0.9043545126914978, | |
| "learning_rate": 0.0005231999999999999, | |
| "loss": 1.0988, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 0.912, | |
| "grad_norm": 0.9109718203544617, | |
| "learning_rate": 0.000522, | |
| "loss": 1.0209, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.9136, | |
| "grad_norm": 0.8105588555335999, | |
| "learning_rate": 0.0005208, | |
| "loss": 1.2233, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 0.9152, | |
| "grad_norm": 2.0293259620666504, | |
| "learning_rate": 0.0005195999999999999, | |
| "loss": 1.4025, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 0.9168, | |
| "grad_norm": 1.1730594635009766, | |
| "learning_rate": 0.0005183999999999999, | |
| "loss": 1.481, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 0.9184, | |
| "grad_norm": 0.8027293086051941, | |
| "learning_rate": 0.0005172, | |
| "loss": 1.1333, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.8858001828193665, | |
| "learning_rate": 0.000516, | |
| "loss": 1.4056, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.9216, | |
| "grad_norm": 1.3030261993408203, | |
| "learning_rate": 0.0005147999999999999, | |
| "loss": 1.212, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 0.9232, | |
| "grad_norm": 0.9441176652908325, | |
| "learning_rate": 0.0005135999999999999, | |
| "loss": 1.6681, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 0.9248, | |
| "grad_norm": 0.9312158226966858, | |
| "learning_rate": 0.0005124, | |
| "loss": 1.2352, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 0.9264, | |
| "grad_norm": 1.1646393537521362, | |
| "learning_rate": 0.0005112, | |
| "loss": 1.1658, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 0.928, | |
| "grad_norm": 1.2840147018432617, | |
| "learning_rate": 0.0005099999999999999, | |
| "loss": 1.0198, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9296, | |
| "grad_norm": 1.2597519159317017, | |
| "learning_rate": 0.0005087999999999999, | |
| "loss": 1.0831, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 0.9312, | |
| "grad_norm": 0.7873828411102295, | |
| "learning_rate": 0.0005076, | |
| "loss": 1.0068, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 0.9328, | |
| "grad_norm": 1.5674644708633423, | |
| "learning_rate": 0.0005064, | |
| "loss": 1.3783, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 0.9344, | |
| "grad_norm": 0.9917027354240417, | |
| "learning_rate": 0.0005051999999999999, | |
| "loss": 0.9963, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 0.936, | |
| "grad_norm": 0.9538294672966003, | |
| "learning_rate": 0.0005039999999999999, | |
| "loss": 0.9023, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.9376, | |
| "grad_norm": 1.1332108974456787, | |
| "learning_rate": 0.0005028, | |
| "loss": 1.0699, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 0.9392, | |
| "grad_norm": 1.0121673345565796, | |
| "learning_rate": 0.0005015999999999999, | |
| "loss": 1.0387, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 0.9408, | |
| "grad_norm": 1.13718581199646, | |
| "learning_rate": 0.0005003999999999999, | |
| "loss": 1.1858, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 0.9424, | |
| "grad_norm": 1.2693606615066528, | |
| "learning_rate": 0.0004991999999999999, | |
| "loss": 1.0405, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 0.944, | |
| "grad_norm": 1.4965566396713257, | |
| "learning_rate": 0.000498, | |
| "loss": 1.326, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9456, | |
| "grad_norm": 1.2708925008773804, | |
| "learning_rate": 0.0004967999999999999, | |
| "loss": 1.2337, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 0.9472, | |
| "grad_norm": 2.260007858276367, | |
| "learning_rate": 0.0004955999999999999, | |
| "loss": 1.5223, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 0.9488, | |
| "grad_norm": 1.3588523864746094, | |
| "learning_rate": 0.0004944, | |
| "loss": 1.4486, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 0.9504, | |
| "grad_norm": 3.3943569660186768, | |
| "learning_rate": 0.0004932, | |
| "loss": 1.5663, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 0.952, | |
| "grad_norm": 1.5709065198898315, | |
| "learning_rate": 0.0004919999999999999, | |
| "loss": 1.3337, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.9536, | |
| "grad_norm": 1.6011497974395752, | |
| "learning_rate": 0.0004907999999999999, | |
| "loss": 1.4023, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 0.9552, | |
| "grad_norm": 1.2253276109695435, | |
| "learning_rate": 0.0004896, | |
| "loss": 1.0934, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 0.9568, | |
| "grad_norm": 9.454032897949219, | |
| "learning_rate": 0.0004883999999999999, | |
| "loss": 2.9201, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 0.9584, | |
| "grad_norm": 1.7915419340133667, | |
| "learning_rate": 0.00048719999999999997, | |
| "loss": 1.5261, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.00048719999999999997, | |
| "loss": 1.3199, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9616, | |
| "grad_norm": 18.144990921020508, | |
| "learning_rate": 0.000486, | |
| "loss": 4.1466, | |
| "step": 601 | |
| }, | |
| { | |
| "epoch": 0.9632, | |
| "grad_norm": 1.2664769887924194, | |
| "learning_rate": 0.00048479999999999997, | |
| "loss": 1.6238, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 0.9648, | |
| "grad_norm": 2.453564405441284, | |
| "learning_rate": 0.0004836, | |
| "loss": 1.5469, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 0.9664, | |
| "grad_norm": 2.737936496734619, | |
| "learning_rate": 0.00048239999999999996, | |
| "loss": 1.3178, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 0.968, | |
| "grad_norm": 2.828806161880493, | |
| "learning_rate": 0.0004812, | |
| "loss": 1.4034, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.9696, | |
| "grad_norm": 1.4219012260437012, | |
| "learning_rate": 0.00047999999999999996, | |
| "loss": 1.0615, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 0.9712, | |
| "grad_norm": 2.025907039642334, | |
| "learning_rate": 0.0004788, | |
| "loss": 1.19, | |
| "step": 607 | |
| }, | |
| { | |
| "epoch": 0.9728, | |
| "grad_norm": 0.9138876795768738, | |
| "learning_rate": 0.0004776, | |
| "loss": 1.1638, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 0.9744, | |
| "grad_norm": 0.7301196455955505, | |
| "learning_rate": 0.0004764, | |
| "loss": 1.2678, | |
| "step": 609 | |
| }, | |
| { | |
| "epoch": 0.976, | |
| "grad_norm": 1.6543656587600708, | |
| "learning_rate": 0.0004752, | |
| "loss": 1.1815, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9776, | |
| "grad_norm": 1.8122645616531372, | |
| "learning_rate": 0.000474, | |
| "loss": 1.0917, | |
| "step": 611 | |
| }, | |
| { | |
| "epoch": 0.9792, | |
| "grad_norm": 1.609754204750061, | |
| "learning_rate": 0.0004728, | |
| "loss": 1.1377, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 0.9808, | |
| "grad_norm": 1.2491132020950317, | |
| "learning_rate": 0.00047159999999999997, | |
| "loss": 0.9748, | |
| "step": 613 | |
| }, | |
| { | |
| "epoch": 0.9824, | |
| "grad_norm": 1.6153700351715088, | |
| "learning_rate": 0.0004704, | |
| "loss": 1.1825, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 0.984, | |
| "grad_norm": 2.4447743892669678, | |
| "learning_rate": 0.00046919999999999997, | |
| "loss": 1.1472, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.9856, | |
| "grad_norm": 0.93949294090271, | |
| "learning_rate": 0.000468, | |
| "loss": 0.9327, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 0.9872, | |
| "grad_norm": 1.0353221893310547, | |
| "learning_rate": 0.00046679999999999996, | |
| "loss": 1.3792, | |
| "step": 617 | |
| }, | |
| { | |
| "epoch": 0.9888, | |
| "grad_norm": 1.5396970510482788, | |
| "learning_rate": 0.0004656, | |
| "loss": 1.4076, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 0.9904, | |
| "grad_norm": 2.1144979000091553, | |
| "learning_rate": 0.00046439999999999996, | |
| "loss": 1.2042, | |
| "step": 619 | |
| }, | |
| { | |
| "epoch": 0.992, | |
| "grad_norm": 1.649453043937683, | |
| "learning_rate": 0.0004632, | |
| "loss": 1.033, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9936, | |
| "grad_norm": 1.555721640586853, | |
| "learning_rate": 0.00046199999999999995, | |
| "loss": 1.4478, | |
| "step": 621 | |
| }, | |
| { | |
| "epoch": 0.9952, | |
| "grad_norm": 2.0463335514068604, | |
| "learning_rate": 0.0004608, | |
| "loss": 1.427, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 0.9968, | |
| "grad_norm": 1.6396963596343994, | |
| "learning_rate": 0.00045959999999999995, | |
| "loss": 1.4215, | |
| "step": 623 | |
| }, | |
| { | |
| "epoch": 0.9984, | |
| "grad_norm": 1.0603671073913574, | |
| "learning_rate": 0.0004584, | |
| "loss": 1.2777, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 2.150242328643799, | |
| "learning_rate": 0.00045719999999999995, | |
| "loss": 1.5985, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.0016, | |
| "grad_norm": 13.092059135437012, | |
| "learning_rate": 0.00045599999999999997, | |
| "loss": 3.3297, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 1.0032, | |
| "grad_norm": 5.357777118682861, | |
| "learning_rate": 0.00045479999999999994, | |
| "loss": 1.8506, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 1.0048, | |
| "grad_norm": 3.857879400253296, | |
| "learning_rate": 0.00045359999999999997, | |
| "loss": 1.4082, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 1.0064, | |
| "grad_norm": 5.025846004486084, | |
| "learning_rate": 0.00045239999999999994, | |
| "loss": 2.445, | |
| "step": 629 | |
| }, | |
| { | |
| "epoch": 1.008, | |
| "grad_norm": 3.71512508392334, | |
| "learning_rate": 0.00045119999999999996, | |
| "loss": 2.0449, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.0096, | |
| "grad_norm": 4.47951078414917, | |
| "learning_rate": 0.00045, | |
| "loss": 1.9927, | |
| "step": 631 | |
| }, | |
| { | |
| "epoch": 1.0112, | |
| "grad_norm": 2.713778257369995, | |
| "learning_rate": 0.00044879999999999996, | |
| "loss": 1.4958, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 1.0128, | |
| "grad_norm": 2.6333401203155518, | |
| "learning_rate": 0.0004476, | |
| "loss": 1.3756, | |
| "step": 633 | |
| }, | |
| { | |
| "epoch": 1.0144, | |
| "grad_norm": 3.1156418323516846, | |
| "learning_rate": 0.00044639999999999995, | |
| "loss": 1.6268, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 1.016, | |
| "grad_norm": 2.78902268409729, | |
| "learning_rate": 0.0004452, | |
| "loss": 1.8211, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 1.0176, | |
| "grad_norm": 3.0045857429504395, | |
| "learning_rate": 0.00044399999999999995, | |
| "loss": 1.1757, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 1.0192, | |
| "grad_norm": 1.4704291820526123, | |
| "learning_rate": 0.0004428, | |
| "loss": 1.2811, | |
| "step": 637 | |
| }, | |
| { | |
| "epoch": 1.0208, | |
| "grad_norm": 1.3432084321975708, | |
| "learning_rate": 0.00044159999999999995, | |
| "loss": 1.0086, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 1.0224, | |
| "grad_norm": 0.842569887638092, | |
| "learning_rate": 0.00044039999999999997, | |
| "loss": 1.3255, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 1.024, | |
| "grad_norm": 0.8691660761833191, | |
| "learning_rate": 0.00043919999999999994, | |
| "loss": 1.1076, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.0256, | |
| "grad_norm": 1.083778738975525, | |
| "learning_rate": 0.00043799999999999997, | |
| "loss": 1.0929, | |
| "step": 641 | |
| }, | |
| { | |
| "epoch": 1.0272, | |
| "grad_norm": 1.1408025026321411, | |
| "learning_rate": 0.00043679999999999994, | |
| "loss": 1.2497, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 1.0288, | |
| "grad_norm": 0.8224440217018127, | |
| "learning_rate": 0.00043559999999999996, | |
| "loss": 1.2853, | |
| "step": 643 | |
| }, | |
| { | |
| "epoch": 1.0304, | |
| "grad_norm": 0.7420323491096497, | |
| "learning_rate": 0.00043439999999999993, | |
| "loss": 1.187, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 1.032, | |
| "grad_norm": 0.7818359732627869, | |
| "learning_rate": 0.00043319999999999996, | |
| "loss": 1.0613, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 1.0336, | |
| "grad_norm": 1.2085120677947998, | |
| "learning_rate": 0.00043199999999999993, | |
| "loss": 1.0593, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 1.0352, | |
| "grad_norm": 0.6779820322990417, | |
| "learning_rate": 0.00043079999999999995, | |
| "loss": 0.9878, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 1.0368, | |
| "grad_norm": 1.1257340908050537, | |
| "learning_rate": 0.0004295999999999999, | |
| "loss": 1.0076, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 1.0384, | |
| "grad_norm": 0.6911525726318359, | |
| "learning_rate": 0.00042839999999999995, | |
| "loss": 1.1204, | |
| "step": 649 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 1.0192064046859741, | |
| "learning_rate": 0.0004271999999999999, | |
| "loss": 1.217, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0416, | |
| "grad_norm": 0.8528116345405579, | |
| "learning_rate": 0.00042599999999999995, | |
| "loss": 1.1124, | |
| "step": 651 | |
| }, | |
| { | |
| "epoch": 1.0432, | |
| "grad_norm": 0.8429757356643677, | |
| "learning_rate": 0.0004247999999999999, | |
| "loss": 1.0387, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 1.0448, | |
| "grad_norm": 1.0031050443649292, | |
| "learning_rate": 0.00042359999999999994, | |
| "loss": 1.1567, | |
| "step": 653 | |
| }, | |
| { | |
| "epoch": 1.0464, | |
| "grad_norm": 1.1668227910995483, | |
| "learning_rate": 0.0004223999999999999, | |
| "loss": 1.3264, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 1.048, | |
| "grad_norm": 0.8505134582519531, | |
| "learning_rate": 0.00042119999999999994, | |
| "loss": 0.8013, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 1.0496, | |
| "grad_norm": 0.7736939191818237, | |
| "learning_rate": 0.00041999999999999996, | |
| "loss": 1.0249, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 1.0512, | |
| "grad_norm": 1.082900047302246, | |
| "learning_rate": 0.00041879999999999993, | |
| "loss": 1.0062, | |
| "step": 657 | |
| }, | |
| { | |
| "epoch": 1.0528, | |
| "grad_norm": 1.9835671186447144, | |
| "learning_rate": 0.00041759999999999996, | |
| "loss": 1.4081, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 1.0544, | |
| "grad_norm": 0.9174219965934753, | |
| "learning_rate": 0.00041639999999999993, | |
| "loss": 0.8602, | |
| "step": 659 | |
| }, | |
| { | |
| "epoch": 1.056, | |
| "grad_norm": 0.8988387584686279, | |
| "learning_rate": 0.00041519999999999995, | |
| "loss": 1.0813, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0576, | |
| "grad_norm": 0.9161027669906616, | |
| "learning_rate": 0.0004139999999999999, | |
| "loss": 0.9137, | |
| "step": 661 | |
| }, | |
| { | |
| "epoch": 1.0592, | |
| "grad_norm": 1.0347057580947876, | |
| "learning_rate": 0.00041279999999999995, | |
| "loss": 0.9526, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 1.0608, | |
| "grad_norm": 0.9697722792625427, | |
| "learning_rate": 0.0004116, | |
| "loss": 1.2357, | |
| "step": 663 | |
| }, | |
| { | |
| "epoch": 1.0624, | |
| "grad_norm": 1.1263432502746582, | |
| "learning_rate": 0.0004104, | |
| "loss": 1.2324, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 1.064, | |
| "grad_norm": 1.4703420400619507, | |
| "learning_rate": 0.00040919999999999997, | |
| "loss": 1.2418, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 1.0656, | |
| "grad_norm": 1.2557834386825562, | |
| "learning_rate": 0.000408, | |
| "loss": 1.5181, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 1.0672, | |
| "grad_norm": 1.1496392488479614, | |
| "learning_rate": 0.00040679999999999997, | |
| "loss": 1.5552, | |
| "step": 667 | |
| }, | |
| { | |
| "epoch": 1.0688, | |
| "grad_norm": 1.0062506198883057, | |
| "learning_rate": 0.0004056, | |
| "loss": 1.3242, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 1.0704, | |
| "grad_norm": 0.8978244066238403, | |
| "learning_rate": 0.0004044, | |
| "loss": 1.3264, | |
| "step": 669 | |
| }, | |
| { | |
| "epoch": 1.072, | |
| "grad_norm": 1.2901413440704346, | |
| "learning_rate": 0.0004032, | |
| "loss": 1.3077, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0735999999999999, | |
| "grad_norm": 1.202578067779541, | |
| "learning_rate": 0.000402, | |
| "loss": 1.1621, | |
| "step": 671 | |
| }, | |
| { | |
| "epoch": 1.0752, | |
| "grad_norm": 1.4110441207885742, | |
| "learning_rate": 0.0004008, | |
| "loss": 1.5065, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 1.0768, | |
| "grad_norm": 1.0681021213531494, | |
| "learning_rate": 0.0003996, | |
| "loss": 1.5376, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 1.0784, | |
| "grad_norm": 1.355599045753479, | |
| "learning_rate": 0.0003984, | |
| "loss": 1.7659, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 1.245712161064148, | |
| "learning_rate": 0.0003972, | |
| "loss": 1.6335, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.0816, | |
| "grad_norm": 10.161030769348145, | |
| "learning_rate": 0.000396, | |
| "loss": 2.7562, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 1.0832, | |
| "grad_norm": 5.946995735168457, | |
| "learning_rate": 0.0003948, | |
| "loss": 2.2905, | |
| "step": 677 | |
| }, | |
| { | |
| "epoch": 1.0848, | |
| "grad_norm": 3.535452365875244, | |
| "learning_rate": 0.00039359999999999997, | |
| "loss": 1.8719, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 1.0864, | |
| "grad_norm": 1.3516403436660767, | |
| "learning_rate": 0.0003924, | |
| "loss": 1.3978, | |
| "step": 679 | |
| }, | |
| { | |
| "epoch": 1.088, | |
| "grad_norm": 1.6472234725952148, | |
| "learning_rate": 0.00039119999999999997, | |
| "loss": 1.2273, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.0896, | |
| "grad_norm": 3.0599803924560547, | |
| "learning_rate": 0.00039, | |
| "loss": 1.4096, | |
| "step": 681 | |
| }, | |
| { | |
| "epoch": 1.0912, | |
| "grad_norm": 3.078174352645874, | |
| "learning_rate": 0.00038879999999999996, | |
| "loss": 1.2859, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 1.0928, | |
| "grad_norm": 3.5751090049743652, | |
| "learning_rate": 0.0003876, | |
| "loss": 1.4634, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 1.0944, | |
| "grad_norm": 2.058401584625244, | |
| "learning_rate": 0.00038639999999999996, | |
| "loss": 1.6292, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 1.096, | |
| "grad_norm": 1.8631670475006104, | |
| "learning_rate": 0.0003852, | |
| "loss": 1.1733, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 1.0976, | |
| "grad_norm": 1.9622036218643188, | |
| "learning_rate": 0.00038399999999999996, | |
| "loss": 1.0265, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 1.0992, | |
| "grad_norm": 1.0497528314590454, | |
| "learning_rate": 0.0003828, | |
| "loss": 0.9263, | |
| "step": 687 | |
| }, | |
| { | |
| "epoch": 1.1008, | |
| "grad_norm": 1.0042531490325928, | |
| "learning_rate": 0.00038159999999999995, | |
| "loss": 1.0379, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 1.1024, | |
| "grad_norm": 0.7379323244094849, | |
| "learning_rate": 0.0003804, | |
| "loss": 0.8909, | |
| "step": 689 | |
| }, | |
| { | |
| "epoch": 1.104, | |
| "grad_norm": 0.9978313446044922, | |
| "learning_rate": 0.00037919999999999995, | |
| "loss": 0.9649, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.1056, | |
| "grad_norm": 1.4265302419662476, | |
| "learning_rate": 0.00037799999999999997, | |
| "loss": 0.8303, | |
| "step": 691 | |
| }, | |
| { | |
| "epoch": 1.1072, | |
| "grad_norm": 1.609402060508728, | |
| "learning_rate": 0.00037679999999999994, | |
| "loss": 1.0412, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 1.1088, | |
| "grad_norm": 1.7801131010055542, | |
| "learning_rate": 0.00037559999999999997, | |
| "loss": 1.2652, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 1.1104, | |
| "grad_norm": 0.9285919666290283, | |
| "learning_rate": 0.0003744, | |
| "loss": 1.2096, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 1.112, | |
| "grad_norm": 0.9512993693351746, | |
| "learning_rate": 0.00037319999999999996, | |
| "loss": 1.1913, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 1.1136, | |
| "grad_norm": 0.9945847392082214, | |
| "learning_rate": 0.000372, | |
| "loss": 1.1681, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 1.1152, | |
| "grad_norm": 0.8993807435035706, | |
| "learning_rate": 0.00037079999999999996, | |
| "loss": 1.2373, | |
| "step": 697 | |
| }, | |
| { | |
| "epoch": 1.1168, | |
| "grad_norm": 1.0664983987808228, | |
| "learning_rate": 0.0003696, | |
| "loss": 1.0773, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 1.1184, | |
| "grad_norm": 1.7917791604995728, | |
| "learning_rate": 0.00036839999999999996, | |
| "loss": 1.4875, | |
| "step": 699 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 0.8973643183708191, | |
| "learning_rate": 0.0003672, | |
| "loss": 1.1206, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.1216, | |
| "grad_norm": 1.2886801958084106, | |
| "learning_rate": 0.00036599999999999995, | |
| "loss": 0.9865, | |
| "step": 701 | |
| }, | |
| { | |
| "epoch": 1.1232, | |
| "grad_norm": 1.4595153331756592, | |
| "learning_rate": 0.0003648, | |
| "loss": 1.388, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 1.1248, | |
| "grad_norm": 1.1467390060424805, | |
| "learning_rate": 0.00036359999999999995, | |
| "loss": 0.9407, | |
| "step": 703 | |
| }, | |
| { | |
| "epoch": 1.1264, | |
| "grad_norm": 1.0743904113769531, | |
| "learning_rate": 0.00036239999999999997, | |
| "loss": 1.0897, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 1.1280000000000001, | |
| "grad_norm": 0.9008836150169373, | |
| "learning_rate": 0.00036119999999999994, | |
| "loss": 0.9555, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 1.1296, | |
| "grad_norm": 1.016830325126648, | |
| "learning_rate": 0.00035999999999999997, | |
| "loss": 1.0143, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 1.1312, | |
| "grad_norm": 1.8725807666778564, | |
| "learning_rate": 0.00035879999999999994, | |
| "loss": 1.4681, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 1.1328, | |
| "grad_norm": 1.0106738805770874, | |
| "learning_rate": 0.00035759999999999996, | |
| "loss": 1.0107, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 1.1344, | |
| "grad_norm": 1.0680439472198486, | |
| "learning_rate": 0.00035639999999999994, | |
| "loss": 0.8917, | |
| "step": 709 | |
| }, | |
| { | |
| "epoch": 1.1360000000000001, | |
| "grad_norm": 0.9104785323143005, | |
| "learning_rate": 0.00035519999999999996, | |
| "loss": 1.1333, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.1376, | |
| "grad_norm": 1.0638065338134766, | |
| "learning_rate": 0.00035399999999999993, | |
| "loss": 0.9465, | |
| "step": 711 | |
| }, | |
| { | |
| "epoch": 1.1392, | |
| "grad_norm": 0.7474643588066101, | |
| "learning_rate": 0.00035279999999999996, | |
| "loss": 0.7617, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 1.1408, | |
| "grad_norm": 0.8218055963516235, | |
| "learning_rate": 0.0003515999999999999, | |
| "loss": 1.0069, | |
| "step": 713 | |
| }, | |
| { | |
| "epoch": 1.1424, | |
| "grad_norm": 1.1213569641113281, | |
| "learning_rate": 0.00035039999999999995, | |
| "loss": 1.0228, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 1.144, | |
| "grad_norm": 1.0540097951889038, | |
| "learning_rate": 0.0003491999999999999, | |
| "loss": 1.2662, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 1.1456, | |
| "grad_norm": 1.019623875617981, | |
| "learning_rate": 0.00034799999999999995, | |
| "loss": 1.2725, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 1.1472, | |
| "grad_norm": 0.9017633199691772, | |
| "learning_rate": 0.0003467999999999999, | |
| "loss": 0.8269, | |
| "step": 717 | |
| }, | |
| { | |
| "epoch": 1.1488, | |
| "grad_norm": 3.7017982006073, | |
| "learning_rate": 0.00034559999999999994, | |
| "loss": 2.0837, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 1.1504, | |
| "grad_norm": 1.0694856643676758, | |
| "learning_rate": 0.00034439999999999997, | |
| "loss": 1.0984, | |
| "step": 719 | |
| }, | |
| { | |
| "epoch": 1.152, | |
| "grad_norm": 1.0227575302124023, | |
| "learning_rate": 0.00034319999999999994, | |
| "loss": 1.1509, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1536, | |
| "grad_norm": 0.8948163390159607, | |
| "learning_rate": 0.00034199999999999996, | |
| "loss": 0.8165, | |
| "step": 721 | |
| }, | |
| { | |
| "epoch": 1.1552, | |
| "grad_norm": 1.575053334236145, | |
| "learning_rate": 0.00034079999999999994, | |
| "loss": 1.4505, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 1.1568, | |
| "grad_norm": 1.6160234212875366, | |
| "learning_rate": 0.00033959999999999996, | |
| "loss": 1.5606, | |
| "step": 723 | |
| }, | |
| { | |
| "epoch": 1.1584, | |
| "grad_norm": 1.469820499420166, | |
| "learning_rate": 0.00033839999999999993, | |
| "loss": 1.3345, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 2.6582064628601074, | |
| "learning_rate": 0.0003372, | |
| "loss": 1.9885, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.1616, | |
| "grad_norm": 8.827315330505371, | |
| "learning_rate": 0.000336, | |
| "loss": 2.7075, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 1.1632, | |
| "grad_norm": 5.201417922973633, | |
| "learning_rate": 0.0003348, | |
| "loss": 2.0887, | |
| "step": 727 | |
| }, | |
| { | |
| "epoch": 1.1648, | |
| "grad_norm": 2.5593836307525635, | |
| "learning_rate": 0.0003336, | |
| "loss": 1.3964, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 1.1663999999999999, | |
| "grad_norm": 2.476527452468872, | |
| "learning_rate": 0.0003324, | |
| "loss": 1.6703, | |
| "step": 729 | |
| }, | |
| { | |
| "epoch": 1.168, | |
| "grad_norm": 1.3854165077209473, | |
| "learning_rate": 0.0003312, | |
| "loss": 1.4116, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1696, | |
| "grad_norm": 1.7695822715759277, | |
| "learning_rate": 0.00033, | |
| "loss": 1.7663, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 1.1712, | |
| "grad_norm": 1.7809518575668335, | |
| "learning_rate": 0.0003288, | |
| "loss": 2.2482, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 1.1728, | |
| "grad_norm": 1.5759507417678833, | |
| "learning_rate": 0.0003276, | |
| "loss": 1.0389, | |
| "step": 733 | |
| }, | |
| { | |
| "epoch": 1.1743999999999999, | |
| "grad_norm": 3.968517780303955, | |
| "learning_rate": 0.0003264, | |
| "loss": 1.1723, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 1.176, | |
| "grad_norm": 3.551710367202759, | |
| "learning_rate": 0.0003252, | |
| "loss": 1.1122, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 1.1776, | |
| "grad_norm": 3.917438507080078, | |
| "learning_rate": 0.000324, | |
| "loss": 1.3001, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 1.1792, | |
| "grad_norm": 2.960092306137085, | |
| "learning_rate": 0.0003228, | |
| "loss": 1.3047, | |
| "step": 737 | |
| }, | |
| { | |
| "epoch": 1.1808, | |
| "grad_norm": 2.588700532913208, | |
| "learning_rate": 0.0003216, | |
| "loss": 1.0372, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 1.1824, | |
| "grad_norm": 2.2377519607543945, | |
| "learning_rate": 0.0003204, | |
| "loss": 1.2321, | |
| "step": 739 | |
| }, | |
| { | |
| "epoch": 1.184, | |
| "grad_norm": 1.3439960479736328, | |
| "learning_rate": 0.0003192, | |
| "loss": 1.2945, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1856, | |
| "grad_norm": 0.8878504037857056, | |
| "learning_rate": 0.000318, | |
| "loss": 1.085, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 1.1872, | |
| "grad_norm": 0.8282541632652283, | |
| "learning_rate": 0.0003168, | |
| "loss": 1.1087, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 1.1888, | |
| "grad_norm": 0.8017875552177429, | |
| "learning_rate": 0.0003156, | |
| "loss": 0.9878, | |
| "step": 743 | |
| }, | |
| { | |
| "epoch": 1.1904, | |
| "grad_norm": 1.347516655921936, | |
| "learning_rate": 0.0003144, | |
| "loss": 1.0613, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 1.192, | |
| "grad_norm": 1.9655871391296387, | |
| "learning_rate": 0.00031319999999999997, | |
| "loss": 1.0696, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 1.1936, | |
| "grad_norm": 1.588913083076477, | |
| "learning_rate": 0.000312, | |
| "loss": 1.2005, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 1.1952, | |
| "grad_norm": 1.29644775390625, | |
| "learning_rate": 0.00031079999999999997, | |
| "loss": 0.904, | |
| "step": 747 | |
| }, | |
| { | |
| "epoch": 1.1968, | |
| "grad_norm": 2.8275885581970215, | |
| "learning_rate": 0.0003096, | |
| "loss": 1.5441, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 1.1984, | |
| "grad_norm": 1.0542739629745483, | |
| "learning_rate": 0.00030839999999999996, | |
| "loss": 0.9999, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.7280116677284241, | |
| "learning_rate": 0.0003072, | |
| "loss": 0.8393, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.2016, | |
| "grad_norm": 0.7934659123420715, | |
| "learning_rate": 0.00030599999999999996, | |
| "loss": 1.1939, | |
| "step": 751 | |
| }, | |
| { | |
| "epoch": 1.2032, | |
| "grad_norm": 0.8406733274459839, | |
| "learning_rate": 0.0003048, | |
| "loss": 0.9562, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 1.2048, | |
| "grad_norm": 1.0488728284835815, | |
| "learning_rate": 0.00030359999999999995, | |
| "loss": 1.2521, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 1.2064, | |
| "grad_norm": 0.8879828453063965, | |
| "learning_rate": 0.0003024, | |
| "loss": 0.7971, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 1.208, | |
| "grad_norm": 1.3260725736618042, | |
| "learning_rate": 0.00030119999999999995, | |
| "loss": 1.2188, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 1.2096, | |
| "grad_norm": 1.1654318571090698, | |
| "learning_rate": 0.0003, | |
| "loss": 0.9387, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 1.2112, | |
| "grad_norm": 1.351473093032837, | |
| "learning_rate": 0.0002988, | |
| "loss": 1.0464, | |
| "step": 757 | |
| }, | |
| { | |
| "epoch": 1.2128, | |
| "grad_norm": 0.9511071443557739, | |
| "learning_rate": 0.00029759999999999997, | |
| "loss": 0.9436, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 1.2144, | |
| "grad_norm": 1.179603934288025, | |
| "learning_rate": 0.0002964, | |
| "loss": 1.1701, | |
| "step": 759 | |
| }, | |
| { | |
| "epoch": 1.216, | |
| "grad_norm": 0.8080942034721375, | |
| "learning_rate": 0.00029519999999999997, | |
| "loss": 1.3787, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.2176, | |
| "grad_norm": 0.7412335872650146, | |
| "learning_rate": 0.000294, | |
| "loss": 1.2793, | |
| "step": 761 | |
| }, | |
| { | |
| "epoch": 1.2192, | |
| "grad_norm": 0.9035298824310303, | |
| "learning_rate": 0.00029279999999999996, | |
| "loss": 1.117, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 1.2208, | |
| "grad_norm": 1.026508092880249, | |
| "learning_rate": 0.0002916, | |
| "loss": 1.0756, | |
| "step": 763 | |
| }, | |
| { | |
| "epoch": 1.2224, | |
| "grad_norm": 1.2814981937408447, | |
| "learning_rate": 0.00029039999999999996, | |
| "loss": 1.0611, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 1.224, | |
| "grad_norm": 1.475760579109192, | |
| "learning_rate": 0.0002892, | |
| "loss": 1.1253, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 1.2256, | |
| "grad_norm": 1.2571303844451904, | |
| "learning_rate": 0.00028799999999999995, | |
| "loss": 1.585, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 1.2272, | |
| "grad_norm": 1.2124806642532349, | |
| "learning_rate": 0.0002868, | |
| "loss": 1.3838, | |
| "step": 767 | |
| }, | |
| { | |
| "epoch": 1.2288000000000001, | |
| "grad_norm": 2.581066131591797, | |
| "learning_rate": 0.00028559999999999995, | |
| "loss": 1.7297, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 1.2304, | |
| "grad_norm": 1.0715489387512207, | |
| "learning_rate": 0.0002844, | |
| "loss": 1.0821, | |
| "step": 769 | |
| }, | |
| { | |
| "epoch": 1.232, | |
| "grad_norm": 1.242422342300415, | |
| "learning_rate": 0.00028319999999999994, | |
| "loss": 1.5755, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.2336, | |
| "grad_norm": 1.0124776363372803, | |
| "learning_rate": 0.00028199999999999997, | |
| "loss": 1.4446, | |
| "step": 771 | |
| }, | |
| { | |
| "epoch": 1.2352, | |
| "grad_norm": 1.3531243801116943, | |
| "learning_rate": 0.0002808, | |
| "loss": 1.1064, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 1.2368000000000001, | |
| "grad_norm": 2.615983724594116, | |
| "learning_rate": 0.00027959999999999997, | |
| "loss": 1.9305, | |
| "step": 773 | |
| }, | |
| { | |
| "epoch": 1.2384, | |
| "grad_norm": 1.1576447486877441, | |
| "learning_rate": 0.0002784, | |
| "loss": 1.2717, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 1.79608154296875, | |
| "learning_rate": 0.0002772, | |
| "loss": 1.9539, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.2416, | |
| "grad_norm": 14.315869331359863, | |
| "learning_rate": 0.000276, | |
| "loss": 3.4927, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 1.2432, | |
| "grad_norm": 4.236233234405518, | |
| "learning_rate": 0.0002748, | |
| "loss": 1.8021, | |
| "step": 777 | |
| }, | |
| { | |
| "epoch": 1.2448, | |
| "grad_norm": 1.4815818071365356, | |
| "learning_rate": 0.0002736, | |
| "loss": 1.5814, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 1.2464, | |
| "grad_norm": 3.584576368331909, | |
| "learning_rate": 0.0002724, | |
| "loss": 2.2016, | |
| "step": 779 | |
| }, | |
| { | |
| "epoch": 1.248, | |
| "grad_norm": 5.444706916809082, | |
| "learning_rate": 0.0002712, | |
| "loss": 2.1992, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2496, | |
| "grad_norm": 1.9935747385025024, | |
| "learning_rate": 0.00027, | |
| "loss": 1.603, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 1.2511999999999999, | |
| "grad_norm": 1.7511041164398193, | |
| "learning_rate": 0.0002688, | |
| "loss": 1.2715, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 1.2528000000000001, | |
| "grad_norm": 3.652242422103882, | |
| "learning_rate": 0.0002676, | |
| "loss": 1.2038, | |
| "step": 783 | |
| }, | |
| { | |
| "epoch": 1.2544, | |
| "grad_norm": 4.156230449676514, | |
| "learning_rate": 0.00026639999999999997, | |
| "loss": 1.1785, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 1.256, | |
| "grad_norm": 3.3570737838745117, | |
| "learning_rate": 0.0002652, | |
| "loss": 1.2539, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 1.2576, | |
| "grad_norm": 2.836935043334961, | |
| "learning_rate": 0.00026399999999999997, | |
| "loss": 1.1734, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 1.2591999999999999, | |
| "grad_norm": 2.9423153400421143, | |
| "learning_rate": 0.0002628, | |
| "loss": 1.2193, | |
| "step": 787 | |
| }, | |
| { | |
| "epoch": 1.2608, | |
| "grad_norm": 2.4489452838897705, | |
| "learning_rate": 0.00026159999999999996, | |
| "loss": 1.291, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 1.2624, | |
| "grad_norm": 1.2637799978256226, | |
| "learning_rate": 0.0002604, | |
| "loss": 0.8955, | |
| "step": 789 | |
| }, | |
| { | |
| "epoch": 1.264, | |
| "grad_norm": 0.8530015349388123, | |
| "learning_rate": 0.00025919999999999996, | |
| "loss": 1.0469, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2656, | |
| "grad_norm": 0.9832066297531128, | |
| "learning_rate": 0.000258, | |
| "loss": 0.8632, | |
| "step": 791 | |
| }, | |
| { | |
| "epoch": 1.2671999999999999, | |
| "grad_norm": 0.5436220765113831, | |
| "learning_rate": 0.00025679999999999995, | |
| "loss": 0.9851, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 1.2688, | |
| "grad_norm": 1.093661904335022, | |
| "learning_rate": 0.0002556, | |
| "loss": 1.4339, | |
| "step": 793 | |
| }, | |
| { | |
| "epoch": 1.2704, | |
| "grad_norm": 0.7037041187286377, | |
| "learning_rate": 0.00025439999999999995, | |
| "loss": 1.1739, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 1.272, | |
| "grad_norm": 0.6901881098747253, | |
| "learning_rate": 0.0002532, | |
| "loss": 0.9546, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 1.2736, | |
| "grad_norm": 1.5446933507919312, | |
| "learning_rate": 0.00025199999999999995, | |
| "loss": 1.336, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 1.2752, | |
| "grad_norm": 1.7216750383377075, | |
| "learning_rate": 0.00025079999999999997, | |
| "loss": 0.8747, | |
| "step": 797 | |
| }, | |
| { | |
| "epoch": 1.2768, | |
| "grad_norm": 1.2335959672927856, | |
| "learning_rate": 0.00024959999999999994, | |
| "loss": 0.969, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 1.2784, | |
| "grad_norm": 1.483788251876831, | |
| "learning_rate": 0.00024839999999999997, | |
| "loss": 0.8357, | |
| "step": 799 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 2.874842882156372, | |
| "learning_rate": 0.0002472, | |
| "loss": 1.2983, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2816, | |
| "grad_norm": 0.7263085246086121, | |
| "learning_rate": 0.00024599999999999996, | |
| "loss": 1.2608, | |
| "step": 801 | |
| }, | |
| { | |
| "epoch": 1.2832, | |
| "grad_norm": 1.0308623313903809, | |
| "learning_rate": 0.0002448, | |
| "loss": 1.117, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 1.2848, | |
| "grad_norm": 0.9009158611297607, | |
| "learning_rate": 0.00024359999999999999, | |
| "loss": 1.0626, | |
| "step": 803 | |
| }, | |
| { | |
| "epoch": 1.2864, | |
| "grad_norm": 1.3145204782485962, | |
| "learning_rate": 0.00024239999999999998, | |
| "loss": 0.9173, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 1.288, | |
| "grad_norm": 1.034488320350647, | |
| "learning_rate": 0.00024119999999999998, | |
| "loss": 1.1901, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 1.2896, | |
| "grad_norm": 0.8789196610450745, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 0.6968, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 1.2912, | |
| "grad_norm": 0.9626047015190125, | |
| "learning_rate": 0.0002388, | |
| "loss": 1.0333, | |
| "step": 807 | |
| }, | |
| { | |
| "epoch": 1.2928, | |
| "grad_norm": 0.7256068587303162, | |
| "learning_rate": 0.0002376, | |
| "loss": 0.8452, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 1.2944, | |
| "grad_norm": 0.8389018774032593, | |
| "learning_rate": 0.0002364, | |
| "loss": 1.2869, | |
| "step": 809 | |
| }, | |
| { | |
| "epoch": 1.296, | |
| "grad_norm": 0.7672526836395264, | |
| "learning_rate": 0.0002352, | |
| "loss": 1.1781, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2976, | |
| "grad_norm": 0.9246567487716675, | |
| "learning_rate": 0.000234, | |
| "loss": 1.2095, | |
| "step": 811 | |
| }, | |
| { | |
| "epoch": 1.2992, | |
| "grad_norm": 0.7383604049682617, | |
| "learning_rate": 0.0002328, | |
| "loss": 1.2276, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 1.3008, | |
| "grad_norm": 1.0119376182556152, | |
| "learning_rate": 0.0002316, | |
| "loss": 1.0281, | |
| "step": 813 | |
| }, | |
| { | |
| "epoch": 1.3024, | |
| "grad_norm": 1.0825129747390747, | |
| "learning_rate": 0.0002304, | |
| "loss": 1.3149, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 1.304, | |
| "grad_norm": 0.8355119824409485, | |
| "learning_rate": 0.0002292, | |
| "loss": 1.2918, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 1.3056, | |
| "grad_norm": 0.6260655522346497, | |
| "learning_rate": 0.00022799999999999999, | |
| "loss": 0.936, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 1.3072, | |
| "grad_norm": 1.1685816049575806, | |
| "learning_rate": 0.00022679999999999998, | |
| "loss": 1.2178, | |
| "step": 817 | |
| }, | |
| { | |
| "epoch": 1.3088, | |
| "grad_norm": 1.188957691192627, | |
| "learning_rate": 0.00022559999999999998, | |
| "loss": 1.1981, | |
| "step": 818 | |
| }, | |
| { | |
| "epoch": 1.3104, | |
| "grad_norm": 1.8030787706375122, | |
| "learning_rate": 0.00022439999999999998, | |
| "loss": 1.247, | |
| "step": 819 | |
| }, | |
| { | |
| "epoch": 1.312, | |
| "grad_norm": 1.1914401054382324, | |
| "learning_rate": 0.00022319999999999998, | |
| "loss": 1.0994, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.3136, | |
| "grad_norm": 1.3656498193740845, | |
| "learning_rate": 0.00022199999999999998, | |
| "loss": 1.3633, | |
| "step": 821 | |
| }, | |
| { | |
| "epoch": 1.3152, | |
| "grad_norm": 1.4209223985671997, | |
| "learning_rate": 0.00022079999999999997, | |
| "loss": 1.2058, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 1.3168, | |
| "grad_norm": 1.3564426898956299, | |
| "learning_rate": 0.00021959999999999997, | |
| "loss": 1.6574, | |
| "step": 823 | |
| }, | |
| { | |
| "epoch": 1.3184, | |
| "grad_norm": 2.096992015838623, | |
| "learning_rate": 0.00021839999999999997, | |
| "loss": 1.9215, | |
| "step": 824 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 1.984554409980774, | |
| "learning_rate": 0.00021719999999999997, | |
| "loss": 1.8319, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 1.3216, | |
| "grad_norm": 24.8841495513916, | |
| "learning_rate": 0.00021599999999999996, | |
| "loss": 4.94, | |
| "step": 826 | |
| }, | |
| { | |
| "epoch": 1.3232, | |
| "grad_norm": 3.756709575653076, | |
| "learning_rate": 0.00021479999999999996, | |
| "loss": 1.9233, | |
| "step": 827 | |
| }, | |
| { | |
| "epoch": 1.3248, | |
| "grad_norm": 1.6268775463104248, | |
| "learning_rate": 0.00021359999999999996, | |
| "loss": 1.2712, | |
| "step": 828 | |
| }, | |
| { | |
| "epoch": 1.3264, | |
| "grad_norm": 3.466218948364258, | |
| "learning_rate": 0.00021239999999999996, | |
| "loss": 1.7201, | |
| "step": 829 | |
| }, | |
| { | |
| "epoch": 1.328, | |
| "grad_norm": 1.621490716934204, | |
| "learning_rate": 0.00021119999999999996, | |
| "loss": 1.3709, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.3296000000000001, | |
| "grad_norm": 1.1984294652938843, | |
| "learning_rate": 0.00020999999999999998, | |
| "loss": 1.4106, | |
| "step": 831 | |
| }, | |
| { | |
| "epoch": 1.3312, | |
| "grad_norm": 2.042137384414673, | |
| "learning_rate": 0.00020879999999999998, | |
| "loss": 1.1315, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 1.3328, | |
| "grad_norm": 1.9946519136428833, | |
| "learning_rate": 0.00020759999999999998, | |
| "loss": 1.0351, | |
| "step": 833 | |
| }, | |
| { | |
| "epoch": 1.3344, | |
| "grad_norm": 2.1006603240966797, | |
| "learning_rate": 0.00020639999999999998, | |
| "loss": 1.2888, | |
| "step": 834 | |
| }, | |
| { | |
| "epoch": 1.336, | |
| "grad_norm": 1.6302366256713867, | |
| "learning_rate": 0.0002052, | |
| "loss": 1.224, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 1.3376000000000001, | |
| "grad_norm": 2.526308059692383, | |
| "learning_rate": 0.000204, | |
| "loss": 1.0775, | |
| "step": 836 | |
| }, | |
| { | |
| "epoch": 1.3392, | |
| "grad_norm": 2.1584436893463135, | |
| "learning_rate": 0.0002028, | |
| "loss": 1.2341, | |
| "step": 837 | |
| }, | |
| { | |
| "epoch": 1.3408, | |
| "grad_norm": 1.6390632390975952, | |
| "learning_rate": 0.0002016, | |
| "loss": 1.0818, | |
| "step": 838 | |
| }, | |
| { | |
| "epoch": 1.3424, | |
| "grad_norm": 1.6563626527786255, | |
| "learning_rate": 0.0002004, | |
| "loss": 0.9795, | |
| "step": 839 | |
| }, | |
| { | |
| "epoch": 1.3439999999999999, | |
| "grad_norm": 0.9004166126251221, | |
| "learning_rate": 0.0001992, | |
| "loss": 1.0898, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.3456000000000001, | |
| "grad_norm": 0.8950443863868713, | |
| "learning_rate": 0.000198, | |
| "loss": 0.9329, | |
| "step": 841 | |
| }, | |
| { | |
| "epoch": 1.3472, | |
| "grad_norm": 0.8782767057418823, | |
| "learning_rate": 0.00019679999999999999, | |
| "loss": 0.9925, | |
| "step": 842 | |
| }, | |
| { | |
| "epoch": 1.3488, | |
| "grad_norm": 0.851058304309845, | |
| "learning_rate": 0.00019559999999999998, | |
| "loss": 1.0908, | |
| "step": 843 | |
| }, | |
| { | |
| "epoch": 1.3504, | |
| "grad_norm": 5.08306360244751, | |
| "learning_rate": 0.00019439999999999998, | |
| "loss": 1.6817, | |
| "step": 844 | |
| }, | |
| { | |
| "epoch": 1.3519999999999999, | |
| "grad_norm": 1.4701097011566162, | |
| "learning_rate": 0.00019319999999999998, | |
| "loss": 0.9395, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 1.3536000000000001, | |
| "grad_norm": 0.7278363704681396, | |
| "learning_rate": 0.00019199999999999998, | |
| "loss": 1.0193, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 1.3552, | |
| "grad_norm": 0.9397822618484497, | |
| "learning_rate": 0.00019079999999999998, | |
| "loss": 0.7025, | |
| "step": 847 | |
| }, | |
| { | |
| "epoch": 1.3568, | |
| "grad_norm": 1.3745059967041016, | |
| "learning_rate": 0.00018959999999999997, | |
| "loss": 0.9564, | |
| "step": 848 | |
| }, | |
| { | |
| "epoch": 1.3584, | |
| "grad_norm": 1.193011999130249, | |
| "learning_rate": 0.00018839999999999997, | |
| "loss": 1.1551, | |
| "step": 849 | |
| }, | |
| { | |
| "epoch": 1.3599999999999999, | |
| "grad_norm": 0.889456570148468, | |
| "learning_rate": 0.0001872, | |
| "loss": 0.9999, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3616, | |
| "grad_norm": 1.0669219493865967, | |
| "learning_rate": 0.000186, | |
| "loss": 0.8591, | |
| "step": 851 | |
| }, | |
| { | |
| "epoch": 1.3632, | |
| "grad_norm": 0.9589920043945312, | |
| "learning_rate": 0.0001848, | |
| "loss": 0.9345, | |
| "step": 852 | |
| }, | |
| { | |
| "epoch": 1.3648, | |
| "grad_norm": 0.7081511616706848, | |
| "learning_rate": 0.0001836, | |
| "loss": 0.7226, | |
| "step": 853 | |
| }, | |
| { | |
| "epoch": 1.3664, | |
| "grad_norm": 0.7487833499908447, | |
| "learning_rate": 0.0001824, | |
| "loss": 1.0986, | |
| "step": 854 | |
| }, | |
| { | |
| "epoch": 1.3679999999999999, | |
| "grad_norm": 0.9651376605033875, | |
| "learning_rate": 0.00018119999999999999, | |
| "loss": 0.8951, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 1.3696, | |
| "grad_norm": 0.9545527100563049, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 1.0129, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 1.3712, | |
| "grad_norm": 1.016617774963379, | |
| "learning_rate": 0.00017879999999999998, | |
| "loss": 1.0022, | |
| "step": 857 | |
| }, | |
| { | |
| "epoch": 1.3728, | |
| "grad_norm": 1.4556984901428223, | |
| "learning_rate": 0.00017759999999999998, | |
| "loss": 1.317, | |
| "step": 858 | |
| }, | |
| { | |
| "epoch": 1.3744, | |
| "grad_norm": 0.790810763835907, | |
| "learning_rate": 0.00017639999999999998, | |
| "loss": 1.2533, | |
| "step": 859 | |
| }, | |
| { | |
| "epoch": 1.376, | |
| "grad_norm": 0.9381358027458191, | |
| "learning_rate": 0.00017519999999999998, | |
| "loss": 0.9896, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3776, | |
| "grad_norm": 0.8054640889167786, | |
| "learning_rate": 0.00017399999999999997, | |
| "loss": 0.9845, | |
| "step": 861 | |
| }, | |
| { | |
| "epoch": 1.3792, | |
| "grad_norm": 0.8973929286003113, | |
| "learning_rate": 0.00017279999999999997, | |
| "loss": 1.4839, | |
| "step": 862 | |
| }, | |
| { | |
| "epoch": 1.3808, | |
| "grad_norm": 1.5383504629135132, | |
| "learning_rate": 0.00017159999999999997, | |
| "loss": 1.0332, | |
| "step": 863 | |
| }, | |
| { | |
| "epoch": 1.3824, | |
| "grad_norm": 0.842438817024231, | |
| "learning_rate": 0.00017039999999999997, | |
| "loss": 1.3733, | |
| "step": 864 | |
| }, | |
| { | |
| "epoch": 1.384, | |
| "grad_norm": 1.3562208414077759, | |
| "learning_rate": 0.00016919999999999997, | |
| "loss": 1.3423, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 1.3856, | |
| "grad_norm": 0.8287788033485413, | |
| "learning_rate": 0.000168, | |
| "loss": 1.0022, | |
| "step": 866 | |
| }, | |
| { | |
| "epoch": 1.3872, | |
| "grad_norm": 1.4219720363616943, | |
| "learning_rate": 0.0001668, | |
| "loss": 1.2173, | |
| "step": 867 | |
| }, | |
| { | |
| "epoch": 1.3888, | |
| "grad_norm": 1.0144832134246826, | |
| "learning_rate": 0.0001656, | |
| "loss": 1.3067, | |
| "step": 868 | |
| }, | |
| { | |
| "epoch": 1.3904, | |
| "grad_norm": 1.1086362600326538, | |
| "learning_rate": 0.0001644, | |
| "loss": 1.4451, | |
| "step": 869 | |
| }, | |
| { | |
| "epoch": 1.392, | |
| "grad_norm": 1.1874333620071411, | |
| "learning_rate": 0.0001632, | |
| "loss": 1.4588, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3936, | |
| "grad_norm": 1.2281416654586792, | |
| "learning_rate": 0.000162, | |
| "loss": 1.564, | |
| "step": 871 | |
| }, | |
| { | |
| "epoch": 1.3952, | |
| "grad_norm": 1.024274468421936, | |
| "learning_rate": 0.0001608, | |
| "loss": 1.3052, | |
| "step": 872 | |
| }, | |
| { | |
| "epoch": 1.3968, | |
| "grad_norm": 1.0952630043029785, | |
| "learning_rate": 0.0001596, | |
| "loss": 1.4589, | |
| "step": 873 | |
| }, | |
| { | |
| "epoch": 1.3984, | |
| "grad_norm": 1.3578640222549438, | |
| "learning_rate": 0.0001584, | |
| "loss": 1.8693, | |
| "step": 874 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 1.3908575773239136, | |
| "learning_rate": 0.0001572, | |
| "loss": 2.0153, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 1.4016, | |
| "grad_norm": 3.5373663902282715, | |
| "learning_rate": 0.000156, | |
| "loss": 1.6021, | |
| "step": 876 | |
| }, | |
| { | |
| "epoch": 1.4032, | |
| "grad_norm": 4.33758020401001, | |
| "learning_rate": 0.0001548, | |
| "loss": 1.6701, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 1.4048, | |
| "grad_norm": 0.7620241641998291, | |
| "learning_rate": 0.0001536, | |
| "loss": 1.1137, | |
| "step": 878 | |
| }, | |
| { | |
| "epoch": 1.4064, | |
| "grad_norm": 3.931720733642578, | |
| "learning_rate": 0.0001524, | |
| "loss": 1.7832, | |
| "step": 879 | |
| }, | |
| { | |
| "epoch": 1.408, | |
| "grad_norm": 4.786645412445068, | |
| "learning_rate": 0.0001512, | |
| "loss": 1.9583, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.4096, | |
| "grad_norm": 1.036521553993225, | |
| "learning_rate": 0.00015, | |
| "loss": 1.0819, | |
| "step": 881 | |
| }, | |
| { | |
| "epoch": 1.4112, | |
| "grad_norm": 4.162290096282959, | |
| "learning_rate": 0.00014879999999999998, | |
| "loss": 2.0566, | |
| "step": 882 | |
| }, | |
| { | |
| "epoch": 1.4128, | |
| "grad_norm": 2.359104633331299, | |
| "learning_rate": 0.00014759999999999998, | |
| "loss": 1.6004, | |
| "step": 883 | |
| }, | |
| { | |
| "epoch": 1.4144, | |
| "grad_norm": 1.0020617246627808, | |
| "learning_rate": 0.00014639999999999998, | |
| "loss": 1.3709, | |
| "step": 884 | |
| }, | |
| { | |
| "epoch": 1.416, | |
| "grad_norm": 1.8424478769302368, | |
| "learning_rate": 0.00014519999999999998, | |
| "loss": 1.2331, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 1.4176, | |
| "grad_norm": 1.1963813304901123, | |
| "learning_rate": 0.00014399999999999998, | |
| "loss": 1.0713, | |
| "step": 886 | |
| }, | |
| { | |
| "epoch": 1.4192, | |
| "grad_norm": 1.5633505582809448, | |
| "learning_rate": 0.00014279999999999997, | |
| "loss": 1.0691, | |
| "step": 887 | |
| }, | |
| { | |
| "epoch": 1.4208, | |
| "grad_norm": 1.1638294458389282, | |
| "learning_rate": 0.00014159999999999997, | |
| "loss": 0.9451, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 1.4224, | |
| "grad_norm": 1.7012614011764526, | |
| "learning_rate": 0.0001404, | |
| "loss": 0.8284, | |
| "step": 889 | |
| }, | |
| { | |
| "epoch": 1.424, | |
| "grad_norm": 1.2200738191604614, | |
| "learning_rate": 0.0001392, | |
| "loss": 0.8185, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.4256, | |
| "grad_norm": 1.1766223907470703, | |
| "learning_rate": 0.000138, | |
| "loss": 0.9226, | |
| "step": 891 | |
| }, | |
| { | |
| "epoch": 1.4272, | |
| "grad_norm": 0.9217773675918579, | |
| "learning_rate": 0.0001368, | |
| "loss": 0.8685, | |
| "step": 892 | |
| }, | |
| { | |
| "epoch": 1.4288, | |
| "grad_norm": 0.8851170539855957, | |
| "learning_rate": 0.0001356, | |
| "loss": 0.7968, | |
| "step": 893 | |
| }, | |
| { | |
| "epoch": 1.4304000000000001, | |
| "grad_norm": 0.8986585140228271, | |
| "learning_rate": 0.0001344, | |
| "loss": 0.9926, | |
| "step": 894 | |
| }, | |
| { | |
| "epoch": 1.432, | |
| "grad_norm": 0.760133683681488, | |
| "learning_rate": 0.00013319999999999999, | |
| "loss": 0.761, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 1.4336, | |
| "grad_norm": 1.3854573965072632, | |
| "learning_rate": 0.00013199999999999998, | |
| "loss": 1.3453, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 1.4352, | |
| "grad_norm": 2.538731098175049, | |
| "learning_rate": 0.00013079999999999998, | |
| "loss": 1.405, | |
| "step": 897 | |
| }, | |
| { | |
| "epoch": 1.4368, | |
| "grad_norm": 0.9111962914466858, | |
| "learning_rate": 0.00012959999999999998, | |
| "loss": 1.0013, | |
| "step": 898 | |
| }, | |
| { | |
| "epoch": 1.4384000000000001, | |
| "grad_norm": 0.9045060873031616, | |
| "learning_rate": 0.00012839999999999998, | |
| "loss": 1.0312, | |
| "step": 899 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 0.8330265879631042, | |
| "learning_rate": 0.00012719999999999997, | |
| "loss": 1.1871, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.4416, | |
| "grad_norm": 0.8071021437644958, | |
| "learning_rate": 0.00012599999999999997, | |
| "loss": 1.4302, | |
| "step": 901 | |
| }, | |
| { | |
| "epoch": 1.4432, | |
| "grad_norm": 1.5638083219528198, | |
| "learning_rate": 0.00012479999999999997, | |
| "loss": 1.2347, | |
| "step": 902 | |
| }, | |
| { | |
| "epoch": 1.4447999999999999, | |
| "grad_norm": 1.674561619758606, | |
| "learning_rate": 0.0001236, | |
| "loss": 1.197, | |
| "step": 903 | |
| }, | |
| { | |
| "epoch": 1.4464000000000001, | |
| "grad_norm": 1.1858874559402466, | |
| "learning_rate": 0.0001224, | |
| "loss": 0.8886, | |
| "step": 904 | |
| }, | |
| { | |
| "epoch": 1.448, | |
| "grad_norm": 0.8216437101364136, | |
| "learning_rate": 0.00012119999999999999, | |
| "loss": 0.8166, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 1.4496, | |
| "grad_norm": 0.7633309364318848, | |
| "learning_rate": 0.00011999999999999999, | |
| "loss": 1.0883, | |
| "step": 906 | |
| }, | |
| { | |
| "epoch": 1.4512, | |
| "grad_norm": 0.7902513742446899, | |
| "learning_rate": 0.0001188, | |
| "loss": 1.1526, | |
| "step": 907 | |
| }, | |
| { | |
| "epoch": 1.4527999999999999, | |
| "grad_norm": 0.6996155381202698, | |
| "learning_rate": 0.0001176, | |
| "loss": 0.8232, | |
| "step": 908 | |
| }, | |
| { | |
| "epoch": 1.4544000000000001, | |
| "grad_norm": 1.0348126888275146, | |
| "learning_rate": 0.0001164, | |
| "loss": 1.5505, | |
| "step": 909 | |
| }, | |
| { | |
| "epoch": 1.456, | |
| "grad_norm": 0.9336599707603455, | |
| "learning_rate": 0.0001152, | |
| "loss": 1.1751, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.4576, | |
| "grad_norm": 0.8460116386413574, | |
| "learning_rate": 0.00011399999999999999, | |
| "loss": 1.1737, | |
| "step": 911 | |
| }, | |
| { | |
| "epoch": 1.4592, | |
| "grad_norm": 0.8113738894462585, | |
| "learning_rate": 0.00011279999999999999, | |
| "loss": 1.0484, | |
| "step": 912 | |
| }, | |
| { | |
| "epoch": 1.4607999999999999, | |
| "grad_norm": 0.8300177454948425, | |
| "learning_rate": 0.00011159999999999999, | |
| "loss": 0.7955, | |
| "step": 913 | |
| }, | |
| { | |
| "epoch": 1.4624, | |
| "grad_norm": 1.2062090635299683, | |
| "learning_rate": 0.00011039999999999999, | |
| "loss": 1.3775, | |
| "step": 914 | |
| }, | |
| { | |
| "epoch": 1.464, | |
| "grad_norm": 1.4255359172821045, | |
| "learning_rate": 0.00010919999999999998, | |
| "loss": 1.3335, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 1.4656, | |
| "grad_norm": 1.0286868810653687, | |
| "learning_rate": 0.00010799999999999998, | |
| "loss": 1.1146, | |
| "step": 916 | |
| }, | |
| { | |
| "epoch": 1.4672, | |
| "grad_norm": 1.0137372016906738, | |
| "learning_rate": 0.00010679999999999998, | |
| "loss": 1.0296, | |
| "step": 917 | |
| }, | |
| { | |
| "epoch": 1.4687999999999999, | |
| "grad_norm": 0.9231035113334656, | |
| "learning_rate": 0.00010559999999999998, | |
| "loss": 1.0707, | |
| "step": 918 | |
| }, | |
| { | |
| "epoch": 1.4704, | |
| "grad_norm": 0.7874758839607239, | |
| "learning_rate": 0.00010439999999999999, | |
| "loss": 0.8588, | |
| "step": 919 | |
| }, | |
| { | |
| "epoch": 1.472, | |
| "grad_norm": 0.9133099913597107, | |
| "learning_rate": 0.00010319999999999999, | |
| "loss": 1.3255, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.4736, | |
| "grad_norm": 1.030543565750122, | |
| "learning_rate": 0.000102, | |
| "loss": 1.5101, | |
| "step": 921 | |
| }, | |
| { | |
| "epoch": 1.4752, | |
| "grad_norm": 0.9055063724517822, | |
| "learning_rate": 0.0001008, | |
| "loss": 0.9935, | |
| "step": 922 | |
| }, | |
| { | |
| "epoch": 1.4768, | |
| "grad_norm": 1.4548567533493042, | |
| "learning_rate": 9.96e-05, | |
| "loss": 1.5182, | |
| "step": 923 | |
| }, | |
| { | |
| "epoch": 1.4784, | |
| "grad_norm": 1.2355753183364868, | |
| "learning_rate": 9.839999999999999e-05, | |
| "loss": 1.5844, | |
| "step": 924 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 2.3780314922332764, | |
| "learning_rate": 9.719999999999999e-05, | |
| "loss": 1.9339, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 1.4816, | |
| "grad_norm": 1.8571350574493408, | |
| "learning_rate": 9.599999999999999e-05, | |
| "loss": 1.1195, | |
| "step": 926 | |
| }, | |
| { | |
| "epoch": 1.4832, | |
| "grad_norm": 3.069366455078125, | |
| "learning_rate": 9.479999999999999e-05, | |
| "loss": 1.4374, | |
| "step": 927 | |
| }, | |
| { | |
| "epoch": 1.4848, | |
| "grad_norm": 1.4385106563568115, | |
| "learning_rate": 9.36e-05, | |
| "loss": 1.0552, | |
| "step": 928 | |
| }, | |
| { | |
| "epoch": 1.4864, | |
| "grad_norm": 2.345099449157715, | |
| "learning_rate": 9.24e-05, | |
| "loss": 1.7314, | |
| "step": 929 | |
| }, | |
| { | |
| "epoch": 1.488, | |
| "grad_norm": 0.7494111657142639, | |
| "learning_rate": 9.12e-05, | |
| "loss": 0.8883, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4896, | |
| "grad_norm": 0.8890445232391357, | |
| "learning_rate": 8.999999999999999e-05, | |
| "loss": 0.9971, | |
| "step": 931 | |
| }, | |
| { | |
| "epoch": 1.4912, | |
| "grad_norm": 0.8567062616348267, | |
| "learning_rate": 8.879999999999999e-05, | |
| "loss": 0.9844, | |
| "step": 932 | |
| }, | |
| { | |
| "epoch": 1.4928, | |
| "grad_norm": 2.1418657302856445, | |
| "learning_rate": 8.759999999999999e-05, | |
| "loss": 1.2059, | |
| "step": 933 | |
| }, | |
| { | |
| "epoch": 1.4944, | |
| "grad_norm": 0.9829572439193726, | |
| "learning_rate": 8.639999999999999e-05, | |
| "loss": 1.2171, | |
| "step": 934 | |
| }, | |
| { | |
| "epoch": 1.496, | |
| "grad_norm": 0.8156057000160217, | |
| "learning_rate": 8.519999999999998e-05, | |
| "loss": 0.9593, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 1.4976, | |
| "grad_norm": 1.4363347291946411, | |
| "learning_rate": 8.4e-05, | |
| "loss": 0.9601, | |
| "step": 936 | |
| }, | |
| { | |
| "epoch": 1.4992, | |
| "grad_norm": 1.0625683069229126, | |
| "learning_rate": 8.28e-05, | |
| "loss": 1.0812, | |
| "step": 937 | |
| }, | |
| { | |
| "epoch": 1.5008, | |
| "grad_norm": 0.9699941873550415, | |
| "learning_rate": 8.16e-05, | |
| "loss": 0.9473, | |
| "step": 938 | |
| }, | |
| { | |
| "epoch": 1.5024, | |
| "grad_norm": 0.772868812084198, | |
| "learning_rate": 8.04e-05, | |
| "loss": 0.7998, | |
| "step": 939 | |
| }, | |
| { | |
| "epoch": 1.504, | |
| "grad_norm": 0.9699268341064453, | |
| "learning_rate": 7.92e-05, | |
| "loss": 0.9162, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.5056, | |
| "grad_norm": 0.9714852571487427, | |
| "learning_rate": 7.8e-05, | |
| "loss": 0.7546, | |
| "step": 941 | |
| }, | |
| { | |
| "epoch": 1.5072, | |
| "grad_norm": 0.7131572365760803, | |
| "learning_rate": 7.68e-05, | |
| "loss": 0.9188, | |
| "step": 942 | |
| }, | |
| { | |
| "epoch": 1.5088, | |
| "grad_norm": 0.855786919593811, | |
| "learning_rate": 7.56e-05, | |
| "loss": 1.0208, | |
| "step": 943 | |
| }, | |
| { | |
| "epoch": 1.5104, | |
| "grad_norm": 0.8761354684829712, | |
| "learning_rate": 7.439999999999999e-05, | |
| "loss": 0.9603, | |
| "step": 944 | |
| }, | |
| { | |
| "epoch": 1.512, | |
| "grad_norm": 0.8056983351707458, | |
| "learning_rate": 7.319999999999999e-05, | |
| "loss": 1.0588, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 1.5135999999999998, | |
| "grad_norm": 1.0384901762008667, | |
| "learning_rate": 7.199999999999999e-05, | |
| "loss": 0.8915, | |
| "step": 946 | |
| }, | |
| { | |
| "epoch": 1.5152, | |
| "grad_norm": 0.6739591956138611, | |
| "learning_rate": 7.079999999999999e-05, | |
| "loss": 0.7157, | |
| "step": 947 | |
| }, | |
| { | |
| "epoch": 1.5168, | |
| "grad_norm": 0.7567741274833679, | |
| "learning_rate": 6.96e-05, | |
| "loss": 0.733, | |
| "step": 948 | |
| }, | |
| { | |
| "epoch": 1.5184, | |
| "grad_norm": 1.2941476106643677, | |
| "learning_rate": 6.84e-05, | |
| "loss": 1.2629, | |
| "step": 949 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 0.7849064469337463, | |
| "learning_rate": 6.72e-05, | |
| "loss": 1.0687, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.5215999999999998, | |
| "grad_norm": 0.7167961001396179, | |
| "learning_rate": 6.599999999999999e-05, | |
| "loss": 0.9125, | |
| "step": 951 | |
| }, | |
| { | |
| "epoch": 1.5232, | |
| "grad_norm": 1.4623581171035767, | |
| "learning_rate": 6.479999999999999e-05, | |
| "loss": 1.0419, | |
| "step": 952 | |
| }, | |
| { | |
| "epoch": 1.5248, | |
| "grad_norm": 1.0260752439498901, | |
| "learning_rate": 6.359999999999999e-05, | |
| "loss": 0.759, | |
| "step": 953 | |
| }, | |
| { | |
| "epoch": 1.5264, | |
| "grad_norm": 0.6854276657104492, | |
| "learning_rate": 6.239999999999999e-05, | |
| "loss": 1.1125, | |
| "step": 954 | |
| }, | |
| { | |
| "epoch": 1.528, | |
| "grad_norm": 0.776077151298523, | |
| "learning_rate": 6.12e-05, | |
| "loss": 0.7962, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 1.5295999999999998, | |
| "grad_norm": 0.7898908257484436, | |
| "learning_rate": 5.9999999999999995e-05, | |
| "loss": 1.0421, | |
| "step": 956 | |
| }, | |
| { | |
| "epoch": 1.5312000000000001, | |
| "grad_norm": 0.7632326483726501, | |
| "learning_rate": 5.88e-05, | |
| "loss": 0.8491, | |
| "step": 957 | |
| }, | |
| { | |
| "epoch": 1.5328, | |
| "grad_norm": 0.7451068162918091, | |
| "learning_rate": 5.76e-05, | |
| "loss": 0.9336, | |
| "step": 958 | |
| }, | |
| { | |
| "epoch": 1.5344, | |
| "grad_norm": 0.7700297832489014, | |
| "learning_rate": 5.6399999999999995e-05, | |
| "loss": 0.9, | |
| "step": 959 | |
| }, | |
| { | |
| "epoch": 1.536, | |
| "grad_norm": 0.879389762878418, | |
| "learning_rate": 5.519999999999999e-05, | |
| "loss": 1.1537, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.5375999999999999, | |
| "grad_norm": 0.9828069806098938, | |
| "learning_rate": 5.399999999999999e-05, | |
| "loss": 0.9058, | |
| "step": 961 | |
| }, | |
| { | |
| "epoch": 1.5392000000000001, | |
| "grad_norm": 0.9066524505615234, | |
| "learning_rate": 5.279999999999999e-05, | |
| "loss": 1.1331, | |
| "step": 962 | |
| }, | |
| { | |
| "epoch": 1.5408, | |
| "grad_norm": 0.7387616038322449, | |
| "learning_rate": 5.1599999999999994e-05, | |
| "loss": 0.9451, | |
| "step": 963 | |
| }, | |
| { | |
| "epoch": 1.5424, | |
| "grad_norm": 1.3451749086380005, | |
| "learning_rate": 5.04e-05, | |
| "loss": 1.1639, | |
| "step": 964 | |
| }, | |
| { | |
| "epoch": 1.544, | |
| "grad_norm": 1.0739102363586426, | |
| "learning_rate": 4.9199999999999997e-05, | |
| "loss": 1.4125, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 1.5455999999999999, | |
| "grad_norm": 0.9140891432762146, | |
| "learning_rate": 4.7999999999999994e-05, | |
| "loss": 1.1883, | |
| "step": 966 | |
| }, | |
| { | |
| "epoch": 1.5472000000000001, | |
| "grad_norm": 1.1968399286270142, | |
| "learning_rate": 4.68e-05, | |
| "loss": 1.1692, | |
| "step": 967 | |
| }, | |
| { | |
| "epoch": 1.5488, | |
| "grad_norm": 0.9506883025169373, | |
| "learning_rate": 4.56e-05, | |
| "loss": 0.9939, | |
| "step": 968 | |
| }, | |
| { | |
| "epoch": 1.5504, | |
| "grad_norm": 1.3204574584960938, | |
| "learning_rate": 4.4399999999999995e-05, | |
| "loss": 0.9103, | |
| "step": 969 | |
| }, | |
| { | |
| "epoch": 1.552, | |
| "grad_norm": 1.3693453073501587, | |
| "learning_rate": 4.319999999999999e-05, | |
| "loss": 1.279, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.5535999999999999, | |
| "grad_norm": 1.286435842514038, | |
| "learning_rate": 4.2e-05, | |
| "loss": 1.2637, | |
| "step": 971 | |
| }, | |
| { | |
| "epoch": 1.5552000000000001, | |
| "grad_norm": 1.6549150943756104, | |
| "learning_rate": 4.08e-05, | |
| "loss": 1.5989, | |
| "step": 972 | |
| }, | |
| { | |
| "epoch": 1.5568, | |
| "grad_norm": 1.1446410417556763, | |
| "learning_rate": 3.96e-05, | |
| "loss": 1.5587, | |
| "step": 973 | |
| }, | |
| { | |
| "epoch": 1.5584, | |
| "grad_norm": 1.1329030990600586, | |
| "learning_rate": 3.84e-05, | |
| "loss": 1.5915, | |
| "step": 974 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 6.757206916809082, | |
| "learning_rate": 3.7199999999999996e-05, | |
| "loss": 3.078, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 1.5615999999999999, | |
| "grad_norm": 3.0250446796417236, | |
| "learning_rate": 3.5999999999999994e-05, | |
| "loss": 1.5766, | |
| "step": 976 | |
| }, | |
| { | |
| "epoch": 1.5632000000000001, | |
| "grad_norm": 1.796055793762207, | |
| "learning_rate": 3.48e-05, | |
| "loss": 1.4036, | |
| "step": 977 | |
| }, | |
| { | |
| "epoch": 1.5648, | |
| "grad_norm": 2.8835206031799316, | |
| "learning_rate": 3.36e-05, | |
| "loss": 1.4255, | |
| "step": 978 | |
| }, | |
| { | |
| "epoch": 1.5664, | |
| "grad_norm": 3.2792022228240967, | |
| "learning_rate": 3.2399999999999995e-05, | |
| "loss": 1.5776, | |
| "step": 979 | |
| }, | |
| { | |
| "epoch": 1.568, | |
| "grad_norm": 0.9129714369773865, | |
| "learning_rate": 3.119999999999999e-05, | |
| "loss": 1.1509, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5695999999999999, | |
| "grad_norm": 1.325767993927002, | |
| "learning_rate": 2.9999999999999997e-05, | |
| "loss": 1.3454, | |
| "step": 981 | |
| }, | |
| { | |
| "epoch": 1.5712000000000002, | |
| "grad_norm": 0.8600112795829773, | |
| "learning_rate": 2.88e-05, | |
| "loss": 0.8939, | |
| "step": 982 | |
| }, | |
| { | |
| "epoch": 1.5728, | |
| "grad_norm": 0.6780167818069458, | |
| "learning_rate": 2.7599999999999997e-05, | |
| "loss": 0.7801, | |
| "step": 983 | |
| }, | |
| { | |
| "epoch": 1.5744, | |
| "grad_norm": 1.1986228227615356, | |
| "learning_rate": 2.6399999999999995e-05, | |
| "loss": 1.0117, | |
| "step": 984 | |
| }, | |
| { | |
| "epoch": 1.576, | |
| "grad_norm": 1.3320322036743164, | |
| "learning_rate": 2.52e-05, | |
| "loss": 1.0323, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 1.5776, | |
| "grad_norm": 0.6832807064056396, | |
| "learning_rate": 2.3999999999999997e-05, | |
| "loss": 1.1191, | |
| "step": 986 | |
| }, | |
| { | |
| "epoch": 1.5792000000000002, | |
| "grad_norm": 3.410607099533081, | |
| "learning_rate": 2.28e-05, | |
| "loss": 1.7286, | |
| "step": 987 | |
| }, | |
| { | |
| "epoch": 1.5808, | |
| "grad_norm": 0.7802388668060303, | |
| "learning_rate": 2.1599999999999996e-05, | |
| "loss": 0.7245, | |
| "step": 988 | |
| }, | |
| { | |
| "epoch": 1.5824, | |
| "grad_norm": 0.8889452815055847, | |
| "learning_rate": 2.04e-05, | |
| "loss": 0.8384, | |
| "step": 989 | |
| }, | |
| { | |
| "epoch": 1.584, | |
| "grad_norm": 0.7941523194313049, | |
| "learning_rate": 1.92e-05, | |
| "loss": 1.0049, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.5856, | |
| "grad_norm": 0.8905624747276306, | |
| "learning_rate": 1.7999999999999997e-05, | |
| "loss": 0.7337, | |
| "step": 991 | |
| }, | |
| { | |
| "epoch": 1.5872000000000002, | |
| "grad_norm": 1.1984586715698242, | |
| "learning_rate": 1.68e-05, | |
| "loss": 0.8216, | |
| "step": 992 | |
| }, | |
| { | |
| "epoch": 1.5888, | |
| "grad_norm": 0.8467238545417786, | |
| "learning_rate": 1.5599999999999996e-05, | |
| "loss": 0.807, | |
| "step": 993 | |
| }, | |
| { | |
| "epoch": 1.5904, | |
| "grad_norm": 1.230843424797058, | |
| "learning_rate": 1.44e-05, | |
| "loss": 1.1562, | |
| "step": 994 | |
| }, | |
| { | |
| "epoch": 1.592, | |
| "grad_norm": 1.0017879009246826, | |
| "learning_rate": 1.3199999999999997e-05, | |
| "loss": 0.971, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 1.5936, | |
| "grad_norm": 1.124283790588379, | |
| "learning_rate": 1.1999999999999999e-05, | |
| "loss": 0.8834, | |
| "step": 996 | |
| }, | |
| { | |
| "epoch": 1.5952, | |
| "grad_norm": 0.9626112580299377, | |
| "learning_rate": 1.0799999999999998e-05, | |
| "loss": 1.0106, | |
| "step": 997 | |
| }, | |
| { | |
| "epoch": 1.5968, | |
| "grad_norm": 1.2808488607406616, | |
| "learning_rate": 9.6e-06, | |
| "loss": 0.9121, | |
| "step": 998 | |
| }, | |
| { | |
| "epoch": 1.5984, | |
| "grad_norm": 0.904681921005249, | |
| "learning_rate": 8.4e-06, | |
| "loss": 0.8199, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.8806717395782471, | |
| "learning_rate": 7.2e-06, | |
| "loss": 0.972, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "eval_cer": 0.3090127193119299, | |
| "eval_loss": 1.500978946685791, | |
| "eval_runtime": 162.132, | |
| "eval_samples_per_second": 19.342, | |
| "eval_steps_per_second": 1.209, | |
| "eval_wer": 0.4351051665913261, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "step": 1000, | |
| "total_flos": 6.212261523683712e+18, | |
| "train_loss": 3.1903547011613846, | |
| "train_runtime": 2112.9078, | |
| "train_samples_per_second": 15.145, | |
| "train_steps_per_second": 0.473 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 1000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.212261523683712e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |