| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.9914529914529915, | |
| "eval_steps": 500, | |
| "global_step": 149, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03418803418803419, | |
| "grad_norm": 83.55925960057581, | |
| "learning_rate": 0.0, | |
| "loss": 0.1635, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 75.5491523818588, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.205, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.10256410256410256, | |
| "grad_norm": 69.68045087594498, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.2142, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 57.086453365632416, | |
| "learning_rate": 6e-06, | |
| "loss": 0.1981, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.17094017094017094, | |
| "grad_norm": 49.239480244872944, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.3847, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 16.322352326647238, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1727, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.23931623931623933, | |
| "grad_norm": 10.054689685370425, | |
| "learning_rate": 9.99882649009242e-06, | |
| "loss": 0.1366, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 12.472152502380862, | |
| "learning_rate": 9.995306511219885e-06, | |
| "loss": 0.2145, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 7.146434932900831, | |
| "learning_rate": 9.989441715674422e-06, | |
| "loss": 0.175, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 10.829219452046477, | |
| "learning_rate": 9.981234856414306e-06, | |
| "loss": 0.1697, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.37606837606837606, | |
| "grad_norm": 3.9257977267573696, | |
| "learning_rate": 9.970689785771798e-06, | |
| "loss": 0.1282, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 4.881222222368343, | |
| "learning_rate": 9.957811453644848e-06, | |
| "loss": 0.1137, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 7.191012828431416, | |
| "learning_rate": 9.942605905173593e-06, | |
| "loss": 0.0955, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 11.172906117826464, | |
| "learning_rate": 9.925080277902743e-06, | |
| "loss": 0.1265, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.5128205128205128, | |
| "grad_norm": 6.787542905973914, | |
| "learning_rate": 9.905242798431196e-06, | |
| "loss": 0.1066, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 3.7905884982716795, | |
| "learning_rate": 9.883102778550434e-06, | |
| "loss": 0.0692, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.5811965811965812, | |
| "grad_norm": 8.499180844600431, | |
| "learning_rate": 9.858670610873528e-06, | |
| "loss": 0.075, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 7.628854941819294, | |
| "learning_rate": 9.831957763956814e-06, | |
| "loss": 0.0844, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.6495726495726496, | |
| "grad_norm": 3.4788385946157008, | |
| "learning_rate": 9.802976776916493e-06, | |
| "loss": 0.04, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 9.925442676615837, | |
| "learning_rate": 9.771741253542742e-06, | |
| "loss": 0.1003, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.717948717948718, | |
| "grad_norm": 8.49069545035768, | |
| "learning_rate": 9.738265855914014e-06, | |
| "loss": 0.0564, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 9.111442462642389, | |
| "learning_rate": 9.70256629751462e-06, | |
| "loss": 0.0324, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.7863247863247863, | |
| "grad_norm": 7.948522736839896, | |
| "learning_rate": 9.664659335858755e-06, | |
| "loss": 0.0668, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 16.01231870016539, | |
| "learning_rate": 9.624562764624445e-06, | |
| "loss": 0.0726, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.8547008547008547, | |
| "grad_norm": 13.854838853842292, | |
| "learning_rate": 9.582295405301131e-06, | |
| "loss": 0.103, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 3.4607583628632517, | |
| "learning_rate": 9.537877098354787e-06, | |
| "loss": 0.0585, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.9230769230769231, | |
| "grad_norm": 12.924003397447635, | |
| "learning_rate": 9.491328693914723e-06, | |
| "loss": 0.0402, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 4.118368098723785, | |
| "learning_rate": 9.442672041986456e-06, | |
| "loss": 0.0662, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.9914529914529915, | |
| "grad_norm": 16.501556013072644, | |
| "learning_rate": 9.391929982195233e-06, | |
| "loss": 0.0641, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 16.501556013072644, | |
| "learning_rate": 9.339126333065008e-06, | |
| "loss": 0.0179, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.0341880341880343, | |
| "grad_norm": 10.007135158066854, | |
| "learning_rate": 9.284285880837947e-06, | |
| "loss": 0.0725, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.0683760683760684, | |
| "grad_norm": 4.196446854828196, | |
| "learning_rate": 9.22743436783966e-06, | |
| "loss": 0.0495, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.1025641025641026, | |
| "grad_norm": 4.548096237677968, | |
| "learning_rate": 9.168598480395653e-06, | |
| "loss": 0.0403, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.1367521367521367, | |
| "grad_norm": 10.203571292360358, | |
| "learning_rate": 9.107805836304658e-06, | |
| "loss": 0.0786, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.170940170940171, | |
| "grad_norm": 12.66007143709808, | |
| "learning_rate": 9.045084971874738e-06, | |
| "loss": 0.1094, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.205128205128205, | |
| "grad_norm": 12.611631213450272, | |
| "learning_rate": 8.98046532852822e-06, | |
| "loss": 0.0676, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.2393162393162394, | |
| "grad_norm": 13.35830031699019, | |
| "learning_rate": 8.91397723898178e-06, | |
| "loss": 0.0393, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.2735042735042734, | |
| "grad_norm": 8.690400816793332, | |
| "learning_rate": 8.845651913008145e-06, | |
| "loss": 0.065, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.3076923076923077, | |
| "grad_norm": 14.615618176894753, | |
| "learning_rate": 8.775521422786104e-06, | |
| "loss": 0.0212, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.341880341880342, | |
| "grad_norm": 2.1336317122196418, | |
| "learning_rate": 8.703618687845697e-06, | |
| "loss": 0.0288, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.376068376068376, | |
| "grad_norm": 4.140427071604391, | |
| "learning_rate": 8.629977459615655e-06, | |
| "loss": 0.0432, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.4102564102564101, | |
| "grad_norm": 5.22920788836053, | |
| "learning_rate": 8.554632305580355e-06, | |
| "loss": 0.0383, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.4444444444444444, | |
| "grad_norm": 1.707299197465682, | |
| "learning_rate": 8.477618593053693e-06, | |
| "loss": 0.0264, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.4786324786324787, | |
| "grad_norm": 5.940268523708764, | |
| "learning_rate": 8.39897247257754e-06, | |
| "loss": 0.0236, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.5128205128205128, | |
| "grad_norm": 3.603359880690343, | |
| "learning_rate": 8.318730860952523e-06, | |
| "loss": 0.045, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.547008547008547, | |
| "grad_norm": 5.184468932618174, | |
| "learning_rate": 8.23693142390914e-06, | |
| "loss": 0.0181, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.5811965811965814, | |
| "grad_norm": 4.729640403035601, | |
| "learning_rate": 8.153612558427311e-06, | |
| "loss": 0.0245, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.6153846153846154, | |
| "grad_norm": 1.4004395210335507, | |
| "learning_rate": 8.068813374712689e-06, | |
| "loss": 0.032, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.6495726495726495, | |
| "grad_norm": 5.483564379255356, | |
| "learning_rate": 7.982573677838172e-06, | |
| "loss": 0.0391, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.6837606837606838, | |
| "grad_norm": 12.48720699386242, | |
| "learning_rate": 7.894933949059245e-06, | |
| "loss": 0.0305, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.717948717948718, | |
| "grad_norm": 11.67481825168841, | |
| "learning_rate": 7.805935326811913e-06, | |
| "loss": 0.0643, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.7521367521367521, | |
| "grad_norm": 8.31052833795307, | |
| "learning_rate": 7.715619587402165e-06, | |
| "loss": 0.0325, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.7863247863247862, | |
| "grad_norm": 3.45860780579628, | |
| "learning_rate": 7.624029125396004e-06, | |
| "loss": 0.0213, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.8205128205128205, | |
| "grad_norm": 3.3328870728402125, | |
| "learning_rate": 7.53120693371927e-06, | |
| "loss": 0.003, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.8547008547008548, | |
| "grad_norm": 1.0551381306202823, | |
| "learning_rate": 7.437196583476597e-06, | |
| "loss": 0.0316, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.8888888888888888, | |
| "grad_norm": 4.940796522342355, | |
| "learning_rate": 7.342042203498952e-06, | |
| "loss": 0.0283, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.9230769230769231, | |
| "grad_norm": 1.3838725683764157, | |
| "learning_rate": 7.245788459629397e-06, | |
| "loss": 0.006, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.9572649572649574, | |
| "grad_norm": 1.105772730767765, | |
| "learning_rate": 7.148480533756759e-06, | |
| "loss": 0.0196, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.9914529914529915, | |
| "grad_norm": 3.4472527456227082, | |
| "learning_rate": 7.050164102607081e-06, | |
| "loss": 0.0189, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 3.4472527456227082, | |
| "learning_rate": 6.950885316302773e-06, | |
| "loss": 0.0017, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.034188034188034, | |
| "grad_norm": 2.5594058854187103, | |
| "learning_rate": 6.850690776699574e-06, | |
| "loss": 0.0294, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 2.0683760683760686, | |
| "grad_norm": 2.38316198568798, | |
| "learning_rate": 6.749627515511443e-06, | |
| "loss": 0.0197, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 2.1025641025641026, | |
| "grad_norm": 3.9379010709797893, | |
| "learning_rate": 6.647742972233703e-06, | |
| "loss": 0.0296, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 2.1367521367521367, | |
| "grad_norm": 3.4756056396321586, | |
| "learning_rate": 6.545084971874738e-06, | |
| "loss": 0.0167, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.1709401709401708, | |
| "grad_norm": 25.199221603893488, | |
| "learning_rate": 6.441701702506755e-06, | |
| "loss": 0.0614, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.2051282051282053, | |
| "grad_norm": 6.642877984563196, | |
| "learning_rate": 6.337641692646106e-06, | |
| "loss": 0.0214, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 2.2393162393162394, | |
| "grad_norm": 2.797157842559016, | |
| "learning_rate": 6.2329537884738115e-06, | |
| "loss": 0.0104, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 2.2735042735042734, | |
| "grad_norm": 4.3025299601872256, | |
| "learning_rate": 6.127687130906972e-06, | |
| "loss": 0.0232, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.3076923076923075, | |
| "grad_norm": 11.127364863419961, | |
| "learning_rate": 6.021891132531825e-06, | |
| "loss": 0.0129, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.341880341880342, | |
| "grad_norm": 2.6327078429367723, | |
| "learning_rate": 5.915615454409281e-06, | |
| "loss": 0.02, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.376068376068376, | |
| "grad_norm": 6.642980085247562, | |
| "learning_rate": 5.808909982763825e-06, | |
| "loss": 0.001, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 2.41025641025641, | |
| "grad_norm": 64.72672342279493, | |
| "learning_rate": 5.701824805566722e-06, | |
| "loss": 0.0466, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 2.4444444444444446, | |
| "grad_norm": 7.531014554064458, | |
| "learning_rate": 5.594410189024533e-06, | |
| "loss": 0.0019, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.4786324786324787, | |
| "grad_norm": 0.9212587754890356, | |
| "learning_rate": 5.4867165539839505e-06, | |
| "loss": 0.01, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.5128205128205128, | |
| "grad_norm": 5.3676628920085125, | |
| "learning_rate": 5.378794452264053e-06, | |
| "loss": 0.0702, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.547008547008547, | |
| "grad_norm": 8.604201249574777, | |
| "learning_rate": 5.270694542927089e-06, | |
| "loss": 0.0078, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.5811965811965814, | |
| "grad_norm": 3.442392132103759, | |
| "learning_rate": 5.1624675684989035e-06, | |
| "loss": 0.0099, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 2.6153846153846154, | |
| "grad_norm": 1.226217039772073, | |
| "learning_rate": 5.054164331150199e-06, | |
| "loss": 0.0037, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 2.6495726495726495, | |
| "grad_norm": 8.349284917924178, | |
| "learning_rate": 4.945835668849801e-06, | |
| "loss": 0.0486, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 2.683760683760684, | |
| "grad_norm": 13.983929022139195, | |
| "learning_rate": 4.837532431501098e-06, | |
| "loss": 0.08, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.717948717948718, | |
| "grad_norm": 7.290271590657317, | |
| "learning_rate": 4.729305457072913e-06, | |
| "loss": 0.0113, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 2.752136752136752, | |
| "grad_norm": 5.948542359802803, | |
| "learning_rate": 4.621205547735949e-06, | |
| "loss": 0.0308, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 2.786324786324786, | |
| "grad_norm": 1.7311032900556051, | |
| "learning_rate": 4.513283446016052e-06, | |
| "loss": 0.0176, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 2.8205128205128203, | |
| "grad_norm": 2.919516222473814, | |
| "learning_rate": 4.4055898109754684e-06, | |
| "loss": 0.0049, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.8547008547008548, | |
| "grad_norm": 1.9073912741858117, | |
| "learning_rate": 4.298175194433279e-06, | |
| "loss": 0.009, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.888888888888889, | |
| "grad_norm": 3.474022268526306, | |
| "learning_rate": 4.191090017236177e-06, | |
| "loss": 0.0154, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 2.9230769230769234, | |
| "grad_norm": 5.209234490321998, | |
| "learning_rate": 4.0843845455907195e-06, | |
| "loss": 0.0179, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 2.9572649572649574, | |
| "grad_norm": 5.172963112227645, | |
| "learning_rate": 3.9781088674681764e-06, | |
| "loss": 0.0155, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 2.9914529914529915, | |
| "grad_norm": 3.346150099815718, | |
| "learning_rate": 3.87231286909303e-06, | |
| "loss": 0.0194, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 3.346150099815718, | |
| "learning_rate": 3.767046211526191e-06, | |
| "loss": 0.0016, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.034188034188034, | |
| "grad_norm": 3.628053542950287, | |
| "learning_rate": 3.662358307353897e-06, | |
| "loss": 0.0115, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 3.0683760683760686, | |
| "grad_norm": 2.4035577316004533, | |
| "learning_rate": 3.5582982974932467e-06, | |
| "loss": 0.0122, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 3.1025641025641026, | |
| "grad_norm": 2.0115150221716145, | |
| "learning_rate": 3.4549150281252635e-06, | |
| "loss": 0.0065, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 3.1367521367521367, | |
| "grad_norm": 1.7423751005899801, | |
| "learning_rate": 3.3522570277662986e-06, | |
| "loss": 0.0053, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 3.1709401709401708, | |
| "grad_norm": 1.2120823372902632, | |
| "learning_rate": 3.250372484488558e-06, | |
| "loss": 0.0325, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.2051282051282053, | |
| "grad_norm": 4.5742221694331535, | |
| "learning_rate": 3.149309223300428e-06, | |
| "loss": 0.0107, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 3.2393162393162394, | |
| "grad_norm": 5.680204394389736, | |
| "learning_rate": 3.0491146836972273e-06, | |
| "loss": 0.0129, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 3.2735042735042734, | |
| "grad_norm": 2.7770967720336697, | |
| "learning_rate": 2.9498358973929197e-06, | |
| "loss": 0.0051, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 3.3076923076923075, | |
| "grad_norm": 2.8490979218142036, | |
| "learning_rate": 2.8515194662432423e-06, | |
| "loss": 0.0163, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 3.341880341880342, | |
| "grad_norm": 2.131593020337306, | |
| "learning_rate": 2.7542115403706067e-06, | |
| "loss": 0.0193, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.376068376068376, | |
| "grad_norm": 3.0496672724782465, | |
| "learning_rate": 2.65795779650105e-06, | |
| "loss": 0.007, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 3.41025641025641, | |
| "grad_norm": 2.4627336519789607, | |
| "learning_rate": 2.562803416523405e-06, | |
| "loss": 0.0564, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 3.4444444444444446, | |
| "grad_norm": 4.227140605397066, | |
| "learning_rate": 2.46879306628073e-06, | |
| "loss": 0.029, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 3.4786324786324787, | |
| "grad_norm": 12.74938404068687, | |
| "learning_rate": 2.375970874603998e-06, | |
| "loss": 0.004, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 3.5128205128205128, | |
| "grad_norm": 1.2426442297293803, | |
| "learning_rate": 2.2843804125978356e-06, | |
| "loss": 0.0018, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 3.547008547008547, | |
| "grad_norm": 0.49342076095930326, | |
| "learning_rate": 2.1940646731880887e-06, | |
| "loss": 0.0039, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 3.5811965811965814, | |
| "grad_norm": 2.69930114004212, | |
| "learning_rate": 2.105066050940758e-06, | |
| "loss": 0.0052, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 3.6153846153846154, | |
| "grad_norm": 0.6214273697117633, | |
| "learning_rate": 2.0174263221618307e-06, | |
| "loss": 0.0042, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 3.6495726495726495, | |
| "grad_norm": 2.182802172826988, | |
| "learning_rate": 1.931186625287313e-06, | |
| "loss": 0.0034, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 3.683760683760684, | |
| "grad_norm": 1.2502779472369923, | |
| "learning_rate": 1.8463874415726918e-06, | |
| "loss": 0.0057, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.717948717948718, | |
| "grad_norm": 1.6497554269207777, | |
| "learning_rate": 1.7630685760908623e-06, | |
| "loss": 0.0043, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 3.752136752136752, | |
| "grad_norm": 1.004233298802305, | |
| "learning_rate": 1.6812691390474788e-06, | |
| "loss": 0.0008, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 3.786324786324786, | |
| "grad_norm": 3.151655137691081, | |
| "learning_rate": 1.6010275274224607e-06, | |
| "loss": 0.0146, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 3.8205128205128203, | |
| "grad_norm": 2.663608462515639, | |
| "learning_rate": 1.5223814069463077e-06, | |
| "loss": 0.0006, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 3.8547008547008548, | |
| "grad_norm": 2.4166544278661175, | |
| "learning_rate": 1.4453676944196477e-06, | |
| "loss": 0.0062, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 3.888888888888889, | |
| "grad_norm": 0.42828282447090554, | |
| "learning_rate": 1.370022540384347e-06, | |
| "loss": 0.002, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 3.9230769230769234, | |
| "grad_norm": 0.5035353786016956, | |
| "learning_rate": 1.296381312154305e-06, | |
| "loss": 0.0016, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 3.9572649572649574, | |
| "grad_norm": 0.8145640782367408, | |
| "learning_rate": 1.2244785772138972e-06, | |
| "loss": 0.0019, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 3.9914529914529915, | |
| "grad_norm": 0.6555442820439389, | |
| "learning_rate": 1.1543480869918555e-06, | |
| "loss": 0.0025, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 1.1211572517989323, | |
| "learning_rate": 1.0860227610182222e-06, | |
| "loss": 0.0016, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.034188034188034, | |
| "grad_norm": 2.837751985589557, | |
| "learning_rate": 1.0195346714717813e-06, | |
| "loss": 0.0059, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 4.068376068376068, | |
| "grad_norm": 9.72793521179094, | |
| "learning_rate": 9.549150281252633e-07, | |
| "loss": 0.0521, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 4.102564102564102, | |
| "grad_norm": 0.40180119914672424, | |
| "learning_rate": 8.921941636953435e-07, | |
| "loss": 0.0008, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 4.136752136752137, | |
| "grad_norm": 0.38580788543923267, | |
| "learning_rate": 8.314015196043501e-07, | |
| "loss": 0.0009, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 4.170940170940171, | |
| "grad_norm": 4.7944314874118374, | |
| "learning_rate": 7.725656321603414e-07, | |
| "loss": 0.0047, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 4.205128205128205, | |
| "grad_norm": 1.380709153215471, | |
| "learning_rate": 7.157141191620548e-07, | |
| "loss": 0.0022, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 4.239316239316239, | |
| "grad_norm": 0.09055290150130697, | |
| "learning_rate": 6.60873666934993e-07, | |
| "loss": 0.0004, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 4.273504273504273, | |
| "grad_norm": 0.26721903473765835, | |
| "learning_rate": 6.080700178047688e-07, | |
| "loss": 0.001, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 4.3076923076923075, | |
| "grad_norm": 4.427205308427123, | |
| "learning_rate": 5.573279580135438e-07, | |
| "loss": 0.0307, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 4.3418803418803416, | |
| "grad_norm": 3.7731219517331533, | |
| "learning_rate": 5.086713060852788e-07, | |
| "loss": 0.0196, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.3760683760683765, | |
| "grad_norm": 0.3206150214839682, | |
| "learning_rate": 4.6212290164521554e-07, | |
| "loss": 0.0014, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 4.410256410256411, | |
| "grad_norm": 4.3532485442057745, | |
| "learning_rate": 4.1770459469887003e-07, | |
| "loss": 0.0309, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 4.444444444444445, | |
| "grad_norm": 0.5802295362163845, | |
| "learning_rate": 3.754372353755559e-07, | |
| "loss": 0.0023, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 4.478632478632479, | |
| "grad_norm": 2.568497803610388, | |
| "learning_rate": 3.35340664141246e-07, | |
| "loss": 0.0168, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 4.512820512820513, | |
| "grad_norm": 1.1779499885511535, | |
| "learning_rate": 2.974337024853802e-07, | |
| "loss": 0.0022, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 4.547008547008547, | |
| "grad_norm": 1.3941782756718661, | |
| "learning_rate": 2.617341440859883e-07, | |
| "loss": 0.0026, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 4.581196581196581, | |
| "grad_norm": 1.6601906514570437, | |
| "learning_rate": 2.2825874645725942e-07, | |
| "loss": 0.0017, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 4.615384615384615, | |
| "grad_norm": 0.383326235850294, | |
| "learning_rate": 1.9702322308350675e-07, | |
| "loss": 0.0015, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 4.64957264957265, | |
| "grad_norm": 2.401959029947897, | |
| "learning_rate": 1.6804223604318825e-07, | |
| "loss": 0.0067, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 4.683760683760684, | |
| "grad_norm": 0.9855732491355061, | |
| "learning_rate": 1.413293891264722e-07, | |
| "loss": 0.0036, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.717948717948718, | |
| "grad_norm": 0.12231214645102874, | |
| "learning_rate": 1.1689722144956672e-07, | |
| "loss": 0.0005, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 4.752136752136752, | |
| "grad_norm": 2.5878205380346957, | |
| "learning_rate": 9.475720156880419e-08, | |
| "loss": 0.0107, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 4.786324786324786, | |
| "grad_norm": 0.5782321688283493, | |
| "learning_rate": 7.491972209725807e-08, | |
| "loss": 0.0023, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 4.82051282051282, | |
| "grad_norm": 0.15708765591840856, | |
| "learning_rate": 5.739409482640956e-08, | |
| "loss": 0.0006, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 4.854700854700854, | |
| "grad_norm": 0.883976824477256, | |
| "learning_rate": 4.2188546355153016e-08, | |
| "loss": 0.0018, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 4.888888888888889, | |
| "grad_norm": 5.372037807057783, | |
| "learning_rate": 2.9310214228202016e-08, | |
| "loss": 0.0142, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 4.923076923076923, | |
| "grad_norm": 1.1060803400734966, | |
| "learning_rate": 1.8765143585693924e-08, | |
| "loss": 0.0025, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 4.957264957264957, | |
| "grad_norm": 0.40249495145373326, | |
| "learning_rate": 1.0558284325578038e-08, | |
| "loss": 0.0013, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 4.9914529914529915, | |
| "grad_norm": 0.10203486906721038, | |
| "learning_rate": 4.69348878011644e-09, | |
| "loss": 0.0005, | |
| "step": 149 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 149, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 29626003253248.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |