Invalid JSON: Unexpected token 'I', ..."ad_norm": Infinity,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.992, | |
| "eval_steps": 400, | |
| "global_step": 936, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0064, | |
| "grad_norm": 0.23147642612457275, | |
| "learning_rate": 0.001997860962566845, | |
| "loss": 29.0583, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0128, | |
| "grad_norm": 1.181070327758789, | |
| "learning_rate": 0.001993582887700535, | |
| "loss": 28.9384, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0192, | |
| "grad_norm": 1.9193989038467407, | |
| "learning_rate": 0.0019893048128342247, | |
| "loss": 28.118, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0256, | |
| "grad_norm": 4.55759334564209, | |
| "learning_rate": 0.0019850267379679146, | |
| "loss": 28.2661, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.032, | |
| "grad_norm": 6.3390374183654785, | |
| "learning_rate": 0.0019807486631016045, | |
| "loss": 27.4742, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0384, | |
| "grad_norm": 4.0472869873046875, | |
| "learning_rate": 0.0019764705882352944, | |
| "loss": 27.5481, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0448, | |
| "grad_norm": Infinity, | |
| "learning_rate": 0.0019764705882352944, | |
| "loss": 691.2838, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.0512, | |
| "grad_norm": 596.6735229492188, | |
| "learning_rate": 0.001972192513368984, | |
| "loss": 614.0375, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.0576, | |
| "grad_norm": 374.73675537109375, | |
| "learning_rate": 0.001967914438502674, | |
| "loss": 234.3992, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 28.9688777923584, | |
| "learning_rate": 0.0019636363636363636, | |
| "loss": 41.9122, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0704, | |
| "grad_norm": 169.22000122070312, | |
| "learning_rate": 0.0019593582887700535, | |
| "loss": 41.3339, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.0768, | |
| "grad_norm": 12.487041473388672, | |
| "learning_rate": 0.0019550802139037433, | |
| "loss": 30.189, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.0832, | |
| "grad_norm": 8.636120796203613, | |
| "learning_rate": 0.001950802139037433, | |
| "loss": 27.9389, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.0896, | |
| "grad_norm": 4.744749069213867, | |
| "learning_rate": 0.001946524064171123, | |
| "loss": 27.1649, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 7.722327709197998, | |
| "learning_rate": 0.0019422459893048128, | |
| "loss": 27.3547, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.1024, | |
| "grad_norm": 9.445578575134277, | |
| "learning_rate": 0.0019379679144385026, | |
| "loss": 26.9296, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.1088, | |
| "grad_norm": 6.174092769622803, | |
| "learning_rate": 0.0019336898395721925, | |
| "loss": 27.1266, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.1152, | |
| "grad_norm": 356.22625732421875, | |
| "learning_rate": 0.0019294117647058824, | |
| "loss": 45.6243, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.1216, | |
| "grad_norm": 23.33014488220215, | |
| "learning_rate": 0.001925133689839572, | |
| "loss": 32.972, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 10.953542709350586, | |
| "learning_rate": 0.0019208556149732622, | |
| "loss": 27.1236, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.1344, | |
| "grad_norm": 6.473776340484619, | |
| "learning_rate": 0.0019165775401069518, | |
| "loss": 26.9502, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.1408, | |
| "grad_norm": Infinity, | |
| "learning_rate": 0.001914438502673797, | |
| "loss": 44.8032, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.1472, | |
| "grad_norm": 182.4479217529297, | |
| "learning_rate": 0.0019101604278074866, | |
| "loss": 56.3821, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.1536, | |
| "grad_norm": 24.16608238220215, | |
| "learning_rate": 0.0019058823529411763, | |
| "loss": 31.7551, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 13.420609474182129, | |
| "learning_rate": 0.0019016042780748664, | |
| "loss": 26.8265, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1664, | |
| "grad_norm": 6.592583179473877, | |
| "learning_rate": 0.001897326203208556, | |
| "loss": 26.9479, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.1728, | |
| "grad_norm": 18.330659866333008, | |
| "learning_rate": 0.001893048128342246, | |
| "loss": 26.9247, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.1792, | |
| "grad_norm": 10.113327980041504, | |
| "learning_rate": 0.0018887700534759358, | |
| "loss": 26.654, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.1856, | |
| "grad_norm": Infinity, | |
| "learning_rate": 0.0018866310160427808, | |
| "loss": 158.5872, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 714.9314575195312, | |
| "learning_rate": 0.0018823529411764706, | |
| "loss": 245.1731, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1984, | |
| "grad_norm": 20.074459075927734, | |
| "learning_rate": 0.0018780748663101605, | |
| "loss": 54.0979, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.2048, | |
| "grad_norm": 6.04124641418457, | |
| "learning_rate": 0.0018737967914438502, | |
| "loss": 26.655, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.2112, | |
| "grad_norm": 4.1345906257629395, | |
| "learning_rate": 0.0018695187165775403, | |
| "loss": 26.5224, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.2176, | |
| "grad_norm": 5.253076553344727, | |
| "learning_rate": 0.00186524064171123, | |
| "loss": 26.2523, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.224, | |
| "grad_norm": 4.040248394012451, | |
| "learning_rate": 0.00186096256684492, | |
| "loss": 26.5548, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2304, | |
| "grad_norm": 4.1401262283325195, | |
| "learning_rate": 0.0018566844919786097, | |
| "loss": 26.3328, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.2368, | |
| "grad_norm": 4.546834945678711, | |
| "learning_rate": 0.0018524064171122994, | |
| "loss": 26.1653, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.2432, | |
| "grad_norm": 3.73606276512146, | |
| "learning_rate": 0.0018481283422459895, | |
| "loss": 26.4023, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.2496, | |
| "grad_norm": 3.841369390487671, | |
| "learning_rate": 0.0018438502673796791, | |
| "loss": 26.2402, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "grad_norm": 3.3648502826690674, | |
| "learning_rate": 0.001839572192513369, | |
| "loss": 26.143, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2624, | |
| "grad_norm": 3.7182576656341553, | |
| "learning_rate": 0.0018352941176470589, | |
| "loss": 26.5052, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.2688, | |
| "grad_norm": 3.4634881019592285, | |
| "learning_rate": 0.0018310160427807488, | |
| "loss": 26.2055, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.2752, | |
| "grad_norm": 3.5876669883728027, | |
| "learning_rate": 0.0018267379679144384, | |
| "loss": 26.3232, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.2816, | |
| "grad_norm": 3.0892839431762695, | |
| "learning_rate": 0.0018224598930481285, | |
| "loss": 26.0274, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "grad_norm": 3.4667418003082275, | |
| "learning_rate": 0.0018181818181818182, | |
| "loss": 26.0062, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2944, | |
| "grad_norm": 3.052298069000244, | |
| "learning_rate": 0.001813903743315508, | |
| "loss": 26.2114, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.3008, | |
| "grad_norm": 2.7124950885772705, | |
| "learning_rate": 0.001809625668449198, | |
| "loss": 25.9583, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.3072, | |
| "grad_norm": 2.713899612426758, | |
| "learning_rate": 0.0018053475935828878, | |
| "loss": 26.0525, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.3136, | |
| "grad_norm": 2.6182234287261963, | |
| "learning_rate": 0.0018010695187165775, | |
| "loss": 25.4642, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 2.3585636615753174, | |
| "learning_rate": 0.0017967914438502676, | |
| "loss": 26.0951, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3264, | |
| "grad_norm": 2.5994300842285156, | |
| "learning_rate": 0.0017925133689839572, | |
| "loss": 26.1542, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.3328, | |
| "grad_norm": 2.327500581741333, | |
| "learning_rate": 0.0017882352941176471, | |
| "loss": 26.0515, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.3392, | |
| "grad_norm": 2.2460238933563232, | |
| "learning_rate": 0.001783957219251337, | |
| "loss": 25.722, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.3456, | |
| "grad_norm": 2.0208640098571777, | |
| "learning_rate": 0.0017796791443850269, | |
| "loss": 25.8647, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.352, | |
| "grad_norm": 2.1272950172424316, | |
| "learning_rate": 0.0017754010695187165, | |
| "loss": 25.8338, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.3584, | |
| "grad_norm": 1.743446946144104, | |
| "learning_rate": 0.0017711229946524066, | |
| "loss": 25.6917, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.3648, | |
| "grad_norm": 1.884177565574646, | |
| "learning_rate": 0.0017668449197860963, | |
| "loss": 25.492, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.3712, | |
| "grad_norm": 1.5283397436141968, | |
| "learning_rate": 0.001762566844919786, | |
| "loss": 25.7483, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.3776, | |
| "grad_norm": 1.838241457939148, | |
| "learning_rate": 0.001758288770053476, | |
| "loss": 26.1345, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 1.7873716354370117, | |
| "learning_rate": 0.0017540106951871657, | |
| "loss": 25.5023, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3904, | |
| "grad_norm": 1.707553505897522, | |
| "learning_rate": 0.0017497326203208556, | |
| "loss": 25.6723, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.3968, | |
| "grad_norm": 1.5225554704666138, | |
| "learning_rate": 0.0017454545454545455, | |
| "loss": 25.4451, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.4032, | |
| "grad_norm": 1.5387077331542969, | |
| "learning_rate": 0.0017411764705882354, | |
| "loss": 24.9427, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.4096, | |
| "grad_norm": 1.4218844175338745, | |
| "learning_rate": 0.001736898395721925, | |
| "loss": 25.2159, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.416, | |
| "grad_norm": 1.4706506729125977, | |
| "learning_rate": 0.0017326203208556151, | |
| "loss": 25.5617, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.4224, | |
| "grad_norm": 1.6522170305252075, | |
| "learning_rate": 0.0017283422459893048, | |
| "loss": 25.4537, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.4288, | |
| "grad_norm": 1.5547223091125488, | |
| "learning_rate": 0.0017240641711229947, | |
| "loss": 25.126, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.4352, | |
| "grad_norm": 1.2542740106582642, | |
| "learning_rate": 0.0017197860962566845, | |
| "loss": 25.3198, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.4416, | |
| "grad_norm": 1.3993158340454102, | |
| "learning_rate": 0.0017155080213903744, | |
| "loss": 25.4372, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.448, | |
| "grad_norm": 1.3659427165985107, | |
| "learning_rate": 0.001711229946524064, | |
| "loss": 25.0459, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.4544, | |
| "grad_norm": 1.124814748764038, | |
| "learning_rate": 0.0017069518716577542, | |
| "loss": 25.1648, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.4608, | |
| "grad_norm": 1.0325303077697754, | |
| "learning_rate": 0.0017026737967914438, | |
| "loss": 25.4647, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.4672, | |
| "grad_norm": 1.2640491724014282, | |
| "learning_rate": 0.0016983957219251337, | |
| "loss": 25.0149, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.4736, | |
| "grad_norm": 1.0942180156707764, | |
| "learning_rate": 0.0016941176470588236, | |
| "loss": 25.0814, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 1.2712141275405884, | |
| "learning_rate": 0.0016898395721925135, | |
| "loss": 25.0341, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.4864, | |
| "grad_norm": 1.2034132480621338, | |
| "learning_rate": 0.0016855614973262031, | |
| "loss": 25.0647, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.4928, | |
| "grad_norm": 1.2348299026489258, | |
| "learning_rate": 0.0016812834224598932, | |
| "loss": 24.7844, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.4992, | |
| "grad_norm": 1.691644549369812, | |
| "learning_rate": 0.001677005347593583, | |
| "loss": 24.7689, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.5056, | |
| "grad_norm": 1.4753830432891846, | |
| "learning_rate": 0.0016727272727272726, | |
| "loss": 24.941, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.512, | |
| "grad_norm": 0.9928715229034424, | |
| "learning_rate": 0.0016684491978609627, | |
| "loss": 25.0275, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5184, | |
| "grad_norm": 1.1669100522994995, | |
| "learning_rate": 0.0016641711229946523, | |
| "loss": 24.9014, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.5248, | |
| "grad_norm": 1.39118230342865, | |
| "learning_rate": 0.0016598930481283422, | |
| "loss": 24.6339, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.5312, | |
| "grad_norm": 1.2533951997756958, | |
| "learning_rate": 0.001655614973262032, | |
| "loss": 24.6086, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.5376, | |
| "grad_norm": 0.9174675941467285, | |
| "learning_rate": 0.001651336898395722, | |
| "loss": 25.0262, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.544, | |
| "grad_norm": 0.9981193542480469, | |
| "learning_rate": 0.0016470588235294116, | |
| "loss": 24.6074, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.5504, | |
| "grad_norm": 1.4740686416625977, | |
| "learning_rate": 0.0016427807486631017, | |
| "loss": 24.5649, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.5568, | |
| "grad_norm": 1.0988154411315918, | |
| "learning_rate": 0.0016385026737967914, | |
| "loss": 24.2709, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.5632, | |
| "grad_norm": 1.1431447267532349, | |
| "learning_rate": 0.0016342245989304813, | |
| "loss": 24.7044, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.5696, | |
| "grad_norm": 1.2967267036437988, | |
| "learning_rate": 0.0016299465240641711, | |
| "loss": 24.803, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.576, | |
| "grad_norm": 1.1269863843917847, | |
| "learning_rate": 0.001625668449197861, | |
| "loss": 24.4945, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5824, | |
| "grad_norm": 0.9064795970916748, | |
| "learning_rate": 0.0016213903743315507, | |
| "loss": 24.5421, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.5888, | |
| "grad_norm": 0.9420700669288635, | |
| "learning_rate": 0.0016171122994652408, | |
| "loss": 24.494, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.5952, | |
| "grad_norm": 1.1436915397644043, | |
| "learning_rate": 0.0016128342245989304, | |
| "loss": 24.6417, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.6016, | |
| "grad_norm": 0.9322279691696167, | |
| "learning_rate": 0.0016085561497326205, | |
| "loss": 24.6154, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.608, | |
| "grad_norm": 1.0101568698883057, | |
| "learning_rate": 0.0016042780748663102, | |
| "loss": 24.4768, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6144, | |
| "grad_norm": 1.3874669075012207, | |
| "learning_rate": 0.0016, | |
| "loss": 24.6752, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.6208, | |
| "grad_norm": 1.0779438018798828, | |
| "learning_rate": 0.00159572192513369, | |
| "loss": 24.7572, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.6272, | |
| "grad_norm": 1.3621560335159302, | |
| "learning_rate": 0.0015914438502673798, | |
| "loss": 24.5775, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.6336, | |
| "grad_norm": 1.291939616203308, | |
| "learning_rate": 0.0015871657754010695, | |
| "loss": 24.4171, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 19.172954559326172, | |
| "learning_rate": 0.0015828877005347596, | |
| "loss": 30.6558, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6464, | |
| "grad_norm": 4.52994441986084, | |
| "learning_rate": 0.0015786096256684493, | |
| "loss": 25.1351, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.6528, | |
| "grad_norm": 3.1266472339630127, | |
| "learning_rate": 0.001574331550802139, | |
| "loss": 24.8543, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.6592, | |
| "grad_norm": 2.4678471088409424, | |
| "learning_rate": 0.001570053475935829, | |
| "loss": 24.716, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.6656, | |
| "grad_norm": 3.5065557956695557, | |
| "learning_rate": 0.0015657754010695187, | |
| "loss": 24.6532, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.672, | |
| "grad_norm": 2.501286745071411, | |
| "learning_rate": 0.0015614973262032086, | |
| "loss": 24.7766, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.6784, | |
| "grad_norm": 1.9249541759490967, | |
| "learning_rate": 0.0015572192513368984, | |
| "loss": 24.2869, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.6848, | |
| "grad_norm": 3.193232297897339, | |
| "learning_rate": 0.0015529411764705883, | |
| "loss": 24.7769, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.6912, | |
| "grad_norm": 2.367741823196411, | |
| "learning_rate": 0.001548663101604278, | |
| "loss": 24.7073, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.6976, | |
| "grad_norm": 1.5215927362442017, | |
| "learning_rate": 0.001544385026737968, | |
| "loss": 24.8652, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.704, | |
| "grad_norm": 1.8906418085098267, | |
| "learning_rate": 0.0015401069518716577, | |
| "loss": 24.7487, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.7104, | |
| "grad_norm": 1.5913176536560059, | |
| "learning_rate": 0.0015358288770053476, | |
| "loss": 25.017, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.7168, | |
| "grad_norm": 1.6209039688110352, | |
| "learning_rate": 0.0015315508021390375, | |
| "loss": 24.339, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.7232, | |
| "grad_norm": 1.5647910833358765, | |
| "learning_rate": 0.0015272727272727274, | |
| "loss": 24.1431, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.7296, | |
| "grad_norm": 1.111008882522583, | |
| "learning_rate": 0.001522994652406417, | |
| "loss": 24.3126, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.736, | |
| "grad_norm": 1.259020209312439, | |
| "learning_rate": 0.0015187165775401071, | |
| "loss": 24.1569, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.7424, | |
| "grad_norm": 1.2711575031280518, | |
| "learning_rate": 0.0015144385026737968, | |
| "loss": 24.4551, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.7488, | |
| "grad_norm": 1.1747275590896606, | |
| "learning_rate": 0.0015101604278074867, | |
| "loss": 24.2954, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.7552, | |
| "grad_norm": 1.3041143417358398, | |
| "learning_rate": 0.0015058823529411766, | |
| "loss": 24.3246, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.7616, | |
| "grad_norm": 1.2481331825256348, | |
| "learning_rate": 0.0015016042780748664, | |
| "loss": 24.3238, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.768, | |
| "grad_norm": 1.1186060905456543, | |
| "learning_rate": 0.001497326203208556, | |
| "loss": 24.5642, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.7744, | |
| "grad_norm": 1.3123741149902344, | |
| "learning_rate": 0.0014930481283422462, | |
| "loss": 24.5782, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.7808, | |
| "grad_norm": 1.0362588167190552, | |
| "learning_rate": 0.0014887700534759359, | |
| "loss": 24.519, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.7872, | |
| "grad_norm": 0.9986498355865479, | |
| "learning_rate": 0.0014844919786096255, | |
| "loss": 24.6729, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.7936, | |
| "grad_norm": 1.0757042169570923, | |
| "learning_rate": 0.0014802139037433156, | |
| "loss": 24.2834, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 1.1017972230911255, | |
| "learning_rate": 0.0014759358288770053, | |
| "loss": 23.9501, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.8064, | |
| "grad_norm": 1.1957108974456787, | |
| "learning_rate": 0.0014716577540106952, | |
| "loss": 24.0922, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.8128, | |
| "grad_norm": 1.1655299663543701, | |
| "learning_rate": 0.001467379679144385, | |
| "loss": 24.3735, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.8192, | |
| "grad_norm": 0.9843050837516785, | |
| "learning_rate": 0.001463101604278075, | |
| "loss": 24.1656, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.8256, | |
| "grad_norm": 1.0374311208724976, | |
| "learning_rate": 0.0014588235294117646, | |
| "loss": 24.2207, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.832, | |
| "grad_norm": 0.9728087186813354, | |
| "learning_rate": 0.0014545454545454547, | |
| "loss": 24.1451, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.8384, | |
| "grad_norm": 0.9153401255607605, | |
| "learning_rate": 0.0014502673796791443, | |
| "loss": 24.0903, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.8448, | |
| "grad_norm": 1.1150263547897339, | |
| "learning_rate": 0.0014459893048128342, | |
| "loss": 24.375, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.8512, | |
| "grad_norm": 1.2261894941329956, | |
| "learning_rate": 0.001441711229946524, | |
| "loss": 24.5365, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.8576, | |
| "grad_norm": 0.8708057403564453, | |
| "learning_rate": 0.001437433155080214, | |
| "loss": 24.0552, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.864, | |
| "grad_norm": 0.8445040583610535, | |
| "learning_rate": 0.0014331550802139036, | |
| "loss": 24.2762, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.8704, | |
| "grad_norm": 1.1743762493133545, | |
| "learning_rate": 0.0014288770053475937, | |
| "loss": 24.4677, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.8768, | |
| "grad_norm": 1.5417506694793701, | |
| "learning_rate": 0.0014245989304812834, | |
| "loss": 24.3304, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.8832, | |
| "grad_norm": 1.1955397129058838, | |
| "learning_rate": 0.0014203208556149733, | |
| "loss": 24.2557, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.8896, | |
| "grad_norm": 0.8730282187461853, | |
| "learning_rate": 0.0014160427807486632, | |
| "loss": 24.2498, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.896, | |
| "grad_norm": 1.2659244537353516, | |
| "learning_rate": 0.001411764705882353, | |
| "loss": 24.1083, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.9024, | |
| "grad_norm": 1.3372520208358765, | |
| "learning_rate": 0.0014074866310160427, | |
| "loss": 24.3094, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.9088, | |
| "grad_norm": 0.9774726033210754, | |
| "learning_rate": 0.0014032085561497328, | |
| "loss": 24.2246, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.9152, | |
| "grad_norm": 1.2357187271118164, | |
| "learning_rate": 0.0013989304812834225, | |
| "loss": 23.8482, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.9216, | |
| "grad_norm": 0.8730471730232239, | |
| "learning_rate": 0.0013946524064171121, | |
| "loss": 24.1488, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.928, | |
| "grad_norm": 1.1759275197982788, | |
| "learning_rate": 0.0013903743315508022, | |
| "loss": 24.0689, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.9344, | |
| "grad_norm": 1.0625526905059814, | |
| "learning_rate": 0.0013860962566844919, | |
| "loss": 23.9011, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.9408, | |
| "grad_norm": 0.9794639945030212, | |
| "learning_rate": 0.0013818181818181818, | |
| "loss": 24.1742, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.9472, | |
| "grad_norm": 0.8726012110710144, | |
| "learning_rate": 0.0013775401069518716, | |
| "loss": 24.2234, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.9536, | |
| "grad_norm": 0.9825778007507324, | |
| "learning_rate": 0.0013732620320855615, | |
| "loss": 23.993, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 1.0842067003250122, | |
| "learning_rate": 0.0013689839572192512, | |
| "loss": 24.1026, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9664, | |
| "grad_norm": 0.9663395881652832, | |
| "learning_rate": 0.0013647058823529413, | |
| "loss": 24.1273, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.9728, | |
| "grad_norm": 1.0321509838104248, | |
| "learning_rate": 0.001360427807486631, | |
| "loss": 24.0919, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.9792, | |
| "grad_norm": 1.0301271677017212, | |
| "learning_rate": 0.001356149732620321, | |
| "loss": 23.9162, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.9856, | |
| "grad_norm": 1.1476960182189941, | |
| "learning_rate": 0.0013518716577540107, | |
| "loss": 23.9016, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.992, | |
| "grad_norm": 1.044398307800293, | |
| "learning_rate": 0.0013475935828877006, | |
| "loss": 24.11, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.9984, | |
| "grad_norm": 1.3517370223999023, | |
| "learning_rate": 0.0013433155080213905, | |
| "loss": 23.9528, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 1.0032, | |
| "grad_norm": 1.1732239723205566, | |
| "learning_rate": 0.0013390374331550803, | |
| "loss": 18.1742, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 1.0096, | |
| "grad_norm": 1.0016353130340576, | |
| "learning_rate": 0.00133475935828877, | |
| "loss": 23.7102, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 1.016, | |
| "grad_norm": 1.3539528846740723, | |
| "learning_rate": 0.00133048128342246, | |
| "loss": 23.8078, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 1.0224, | |
| "grad_norm": 1.2378896474838257, | |
| "learning_rate": 0.0013262032085561498, | |
| "loss": 24.2912, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.0288, | |
| "grad_norm": 1.118072748184204, | |
| "learning_rate": 0.0013219251336898396, | |
| "loss": 23.9082, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 1.0352, | |
| "grad_norm": 1.4039278030395508, | |
| "learning_rate": 0.0013176470588235295, | |
| "loss": 24.1193, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 1.0416, | |
| "grad_norm": 0.9324209094047546, | |
| "learning_rate": 0.0013133689839572194, | |
| "loss": 23.9758, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 1.048, | |
| "grad_norm": 1.21006178855896, | |
| "learning_rate": 0.001309090909090909, | |
| "loss": 24.0922, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 1.0544, | |
| "grad_norm": 1.083890438079834, | |
| "learning_rate": 0.0013048128342245992, | |
| "loss": 23.9699, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.0608, | |
| "grad_norm": 0.8704414367675781, | |
| "learning_rate": 0.0013005347593582888, | |
| "loss": 23.6364, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 1.0672, | |
| "grad_norm": 0.9826658964157104, | |
| "learning_rate": 0.0012962566844919785, | |
| "loss": 24.1282, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 1.0735999999999999, | |
| "grad_norm": 1.005998969078064, | |
| "learning_rate": 0.0012919786096256686, | |
| "loss": 24.0106, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 0.9576805233955383, | |
| "learning_rate": 0.0012877005347593582, | |
| "loss": 23.7264, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 1.0864, | |
| "grad_norm": 0.9244794845581055, | |
| "learning_rate": 0.0012834224598930481, | |
| "loss": 23.7184, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.0928, | |
| "grad_norm": 0.8726948499679565, | |
| "learning_rate": 0.001279144385026738, | |
| "loss": 23.8847, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 1.0992, | |
| "grad_norm": 1.184464931488037, | |
| "learning_rate": 0.0012748663101604279, | |
| "loss": 23.9564, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 1.1056, | |
| "grad_norm": 1.0483590364456177, | |
| "learning_rate": 0.0012705882352941175, | |
| "loss": 24.0259, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 1.112, | |
| "grad_norm": 0.85725998878479, | |
| "learning_rate": 0.0012663101604278076, | |
| "loss": 23.9331, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 1.1184, | |
| "grad_norm": 0.9352357387542725, | |
| "learning_rate": 0.0012620320855614973, | |
| "loss": 23.9268, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.1248, | |
| "grad_norm": 0.9234675765037537, | |
| "learning_rate": 0.0012577540106951872, | |
| "loss": 24.0202, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 1.1312, | |
| "grad_norm": 0.8931582570075989, | |
| "learning_rate": 0.001253475935828877, | |
| "loss": 23.7643, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 1.1376, | |
| "grad_norm": 0.9177488684654236, | |
| "learning_rate": 0.001249197860962567, | |
| "loss": 23.7432, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 1.144, | |
| "grad_norm": 0.8955037593841553, | |
| "learning_rate": 0.0012449197860962566, | |
| "loss": 23.7637, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 1.1504, | |
| "grad_norm": 1.0113520622253418, | |
| "learning_rate": 0.0012406417112299467, | |
| "loss": 23.931, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.1568, | |
| "grad_norm": 0.8994941711425781, | |
| "learning_rate": 0.0012363636363636364, | |
| "loss": 23.7517, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 1.1632, | |
| "grad_norm": 0.9318895936012268, | |
| "learning_rate": 0.0012320855614973262, | |
| "loss": 23.8983, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 1.1696, | |
| "grad_norm": 1.0647019147872925, | |
| "learning_rate": 0.0012278074866310161, | |
| "loss": 23.8213, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 1.176, | |
| "grad_norm": 1.132948637008667, | |
| "learning_rate": 0.001223529411764706, | |
| "loss": 23.6347, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 1.1824, | |
| "grad_norm": 0.9466244578361511, | |
| "learning_rate": 0.0012192513368983957, | |
| "loss": 23.9028, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.1888, | |
| "grad_norm": 0.9593258500099182, | |
| "learning_rate": 0.0012149732620320858, | |
| "loss": 23.98, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 1.1952, | |
| "grad_norm": 1.0898252725601196, | |
| "learning_rate": 0.0012106951871657754, | |
| "loss": 23.5301, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 1.2016, | |
| "grad_norm": 1.0035079717636108, | |
| "learning_rate": 0.001206417112299465, | |
| "loss": 23.6481, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 1.208, | |
| "grad_norm": 0.890784502029419, | |
| "learning_rate": 0.0012021390374331552, | |
| "loss": 23.638, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 1.2144, | |
| "grad_norm": 1.0010117292404175, | |
| "learning_rate": 0.0011978609625668448, | |
| "loss": 23.7085, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.2208, | |
| "grad_norm": 0.92851322889328, | |
| "learning_rate": 0.0011935828877005347, | |
| "loss": 23.9561, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 1.2272, | |
| "grad_norm": 0.966708242893219, | |
| "learning_rate": 0.0011893048128342246, | |
| "loss": 23.5308, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 1.2336, | |
| "grad_norm": 0.9790504574775696, | |
| "learning_rate": 0.0011850267379679145, | |
| "loss": 23.8811, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 0.9370564818382263, | |
| "learning_rate": 0.0011807486631016041, | |
| "loss": 23.6945, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 1.2464, | |
| "grad_norm": 1.0068321228027344, | |
| "learning_rate": 0.0011764705882352942, | |
| "loss": 23.7471, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.2528000000000001, | |
| "grad_norm": 0.880582332611084, | |
| "learning_rate": 0.001172192513368984, | |
| "loss": 23.4096, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 1.2591999999999999, | |
| "grad_norm": 0.8774146437644958, | |
| "learning_rate": 0.0011679144385026738, | |
| "loss": 23.7207, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 1.2656, | |
| "grad_norm": 1.070694923400879, | |
| "learning_rate": 0.0011636363636363637, | |
| "loss": 23.6461, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 1.272, | |
| "grad_norm": 1.0852477550506592, | |
| "learning_rate": 0.0011593582887700535, | |
| "loss": 23.8393, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 1.2784, | |
| "grad_norm": 0.9843823313713074, | |
| "learning_rate": 0.0011550802139037432, | |
| "loss": 23.798, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.2784, | |
| "eval_loss": 1.4875859022140503, | |
| "eval_runtime": 1157.3489, | |
| "eval_samples_per_second": 8.64, | |
| "eval_steps_per_second": 1.08, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.2848, | |
| "grad_norm": 0.9516158103942871, | |
| "learning_rate": 0.0011508021390374333, | |
| "loss": 23.8632, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 1.2912, | |
| "grad_norm": 0.9639537930488586, | |
| "learning_rate": 0.001146524064171123, | |
| "loss": 23.7238, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 1.2976, | |
| "grad_norm": 1.018441081047058, | |
| "learning_rate": 0.0011422459893048128, | |
| "loss": 23.5542, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 1.304, | |
| "grad_norm": 0.9791119694709778, | |
| "learning_rate": 0.0011379679144385027, | |
| "loss": 23.5262, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 1.3104, | |
| "grad_norm": 0.9863831996917725, | |
| "learning_rate": 0.0011336898395721926, | |
| "loss": 23.763, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.3168, | |
| "grad_norm": 0.9786751866340637, | |
| "learning_rate": 0.0011294117647058823, | |
| "loss": 23.6861, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 1.3232, | |
| "grad_norm": 0.9191985130310059, | |
| "learning_rate": 0.0011251336898395724, | |
| "loss": 23.5714, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 1.3296000000000001, | |
| "grad_norm": 1.1038320064544678, | |
| "learning_rate": 0.001120855614973262, | |
| "loss": 23.6797, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 1.336, | |
| "grad_norm": 0.9039956331253052, | |
| "learning_rate": 0.0011165775401069521, | |
| "loss": 23.5835, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 1.3424, | |
| "grad_norm": 0.9752773642539978, | |
| "learning_rate": 0.0011122994652406418, | |
| "loss": 23.8994, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.3488, | |
| "grad_norm": 1.058692455291748, | |
| "learning_rate": 0.0011080213903743314, | |
| "loss": 23.7038, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 1.3552, | |
| "grad_norm": 0.89438396692276, | |
| "learning_rate": 0.0011037433155080215, | |
| "loss": 23.6139, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 1.3616, | |
| "grad_norm": 0.9645833373069763, | |
| "learning_rate": 0.0010994652406417112, | |
| "loss": 23.603, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 1.3679999999999999, | |
| "grad_norm": 0.9102054834365845, | |
| "learning_rate": 0.001095187165775401, | |
| "loss": 23.6805, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 1.3744, | |
| "grad_norm": 1.066123366355896, | |
| "learning_rate": 0.001090909090909091, | |
| "loss": 23.623, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.3808, | |
| "grad_norm": 0.9730815291404724, | |
| "learning_rate": 0.0010866310160427808, | |
| "loss": 23.9918, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 1.3872, | |
| "grad_norm": 0.953152596950531, | |
| "learning_rate": 0.0010823529411764705, | |
| "loss": 23.5223, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 1.3936, | |
| "grad_norm": 0.9269129633903503, | |
| "learning_rate": 0.0010780748663101606, | |
| "loss": 23.5794, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 1.0842361450195312, | |
| "learning_rate": 0.0010737967914438503, | |
| "loss": 23.0374, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.4064, | |
| "grad_norm": 0.9895943999290466, | |
| "learning_rate": 0.0010695187165775401, | |
| "loss": 23.6018, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.4128, | |
| "grad_norm": 0.9779914617538452, | |
| "learning_rate": 0.00106524064171123, | |
| "loss": 23.4881, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 1.4192, | |
| "grad_norm": 0.9720066785812378, | |
| "learning_rate": 0.0010609625668449199, | |
| "loss": 23.7365, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 1.4256, | |
| "grad_norm": 0.9314095973968506, | |
| "learning_rate": 0.0010566844919786096, | |
| "loss": 23.6696, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 1.432, | |
| "grad_norm": 0.9558563232421875, | |
| "learning_rate": 0.0010524064171122996, | |
| "loss": 23.5277, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 1.4384000000000001, | |
| "grad_norm": 0.9274935722351074, | |
| "learning_rate": 0.0010481283422459893, | |
| "loss": 23.8089, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.4447999999999999, | |
| "grad_norm": 0.8989819884300232, | |
| "learning_rate": 0.0010438502673796792, | |
| "loss": 23.4861, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 1.4512, | |
| "grad_norm": 0.8949527144432068, | |
| "learning_rate": 0.001039572192513369, | |
| "loss": 23.8258, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 1.4576, | |
| "grad_norm": 1.0032631158828735, | |
| "learning_rate": 0.001035294117647059, | |
| "loss": 23.6332, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 1.464, | |
| "grad_norm": 0.8726269006729126, | |
| "learning_rate": 0.0010310160427807486, | |
| "loss": 23.852, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 1.4704, | |
| "grad_norm": 1.013187289237976, | |
| "learning_rate": 0.0010267379679144387, | |
| "loss": 23.6731, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.4768, | |
| "grad_norm": 0.9792904853820801, | |
| "learning_rate": 0.0010224598930481284, | |
| "loss": 23.4724, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 1.4832, | |
| "grad_norm": 0.9881017804145813, | |
| "learning_rate": 0.001018181818181818, | |
| "loss": 23.6523, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 1.4896, | |
| "grad_norm": 1.0167256593704224, | |
| "learning_rate": 0.0010139037433155081, | |
| "loss": 23.7077, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 1.496, | |
| "grad_norm": 1.028531551361084, | |
| "learning_rate": 0.0010096256684491978, | |
| "loss": 24.1109, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 1.5024, | |
| "grad_norm": 0.9525850415229797, | |
| "learning_rate": 0.0010053475935828877, | |
| "loss": 23.9823, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.5088, | |
| "grad_norm": 1.1063846349716187, | |
| "learning_rate": 0.0010010695187165776, | |
| "loss": 23.6939, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 1.5152, | |
| "grad_norm": 1.023152232170105, | |
| "learning_rate": 0.0009967914438502674, | |
| "loss": 23.5762, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 1.5215999999999998, | |
| "grad_norm": 0.9533156752586365, | |
| "learning_rate": 0.0009925133689839573, | |
| "loss": 23.8931, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 1.528, | |
| "grad_norm": 1.01291823387146, | |
| "learning_rate": 0.0009882352941176472, | |
| "loss": 23.7593, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 1.5344, | |
| "grad_norm": 0.967445969581604, | |
| "learning_rate": 0.000983957219251337, | |
| "loss": 23.5105, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.5408, | |
| "grad_norm": 0.8999999165534973, | |
| "learning_rate": 0.0009796791443850267, | |
| "loss": 23.9054, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 1.5472000000000001, | |
| "grad_norm": 1.0055955648422241, | |
| "learning_rate": 0.0009754010695187165, | |
| "loss": 23.6609, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 1.5535999999999999, | |
| "grad_norm": 0.9917962551116943, | |
| "learning_rate": 0.0009711229946524064, | |
| "loss": 23.664, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 1.0509769916534424, | |
| "learning_rate": 0.0009668449197860963, | |
| "loss": 23.5889, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 1.5664, | |
| "grad_norm": 0.9780173301696777, | |
| "learning_rate": 0.000962566844919786, | |
| "loss": 23.3563, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.5728, | |
| "grad_norm": 1.0623761415481567, | |
| "learning_rate": 0.0009582887700534759, | |
| "loss": 23.7324, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 1.5792000000000002, | |
| "grad_norm": 0.9415885806083679, | |
| "learning_rate": 0.0009540106951871658, | |
| "loss": 23.6075, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 1.5856, | |
| "grad_norm": 1.0558156967163086, | |
| "learning_rate": 0.0009497326203208556, | |
| "loss": 23.9617, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 1.592, | |
| "grad_norm": 1.2639505863189697, | |
| "learning_rate": 0.0009454545454545454, | |
| "loss": 23.7581, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 1.5984, | |
| "grad_norm": 0.9966816902160645, | |
| "learning_rate": 0.0009411764705882353, | |
| "loss": 23.4159, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.6048, | |
| "grad_norm": 1.1803079843521118, | |
| "learning_rate": 0.0009368983957219251, | |
| "loss": 23.6882, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 1.6112, | |
| "grad_norm": 1.068477749824524, | |
| "learning_rate": 0.000932620320855615, | |
| "loss": 23.3086, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 1.6176, | |
| "grad_norm": 1.0496388673782349, | |
| "learning_rate": 0.0009283422459893048, | |
| "loss": 23.5714, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 1.624, | |
| "grad_norm": 0.9868847727775574, | |
| "learning_rate": 0.0009240641711229947, | |
| "loss": 23.3231, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 1.6303999999999998, | |
| "grad_norm": 1.0146859884262085, | |
| "learning_rate": 0.0009197860962566845, | |
| "loss": 23.474, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.6368, | |
| "grad_norm": 1.3640587329864502, | |
| "learning_rate": 0.0009155080213903744, | |
| "loss": 23.728, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 1.6432, | |
| "grad_norm": 1.0222820043563843, | |
| "learning_rate": 0.0009112299465240643, | |
| "loss": 23.8034, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 1.6496, | |
| "grad_norm": 0.9659361243247986, | |
| "learning_rate": 0.000906951871657754, | |
| "loss": 23.454, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 1.6560000000000001, | |
| "grad_norm": 1.0694700479507446, | |
| "learning_rate": 0.0009026737967914439, | |
| "loss": 23.7977, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 1.6623999999999999, | |
| "grad_norm": 0.9259259700775146, | |
| "learning_rate": 0.0008983957219251338, | |
| "loss": 23.5328, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.6688, | |
| "grad_norm": 1.1431095600128174, | |
| "learning_rate": 0.0008941176470588236, | |
| "loss": 23.5061, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 1.6752, | |
| "grad_norm": 1.0091369152069092, | |
| "learning_rate": 0.0008898395721925134, | |
| "loss": 23.414, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 1.6816, | |
| "grad_norm": 1.0047186613082886, | |
| "learning_rate": 0.0008855614973262033, | |
| "loss": 23.7231, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 1.688, | |
| "grad_norm": 1.020326018333435, | |
| "learning_rate": 0.000881283422459893, | |
| "loss": 23.5656, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 1.6944, | |
| "grad_norm": 1.2622140645980835, | |
| "learning_rate": 0.0008770053475935829, | |
| "loss": 23.1935, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.7008, | |
| "grad_norm": 0.9633255004882812, | |
| "learning_rate": 0.0008727272727272727, | |
| "loss": 23.7261, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 1.7072, | |
| "grad_norm": 0.9747726321220398, | |
| "learning_rate": 0.0008684491978609625, | |
| "loss": 23.3123, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 1.7136, | |
| "grad_norm": 0.8444503545761108, | |
| "learning_rate": 0.0008641711229946524, | |
| "loss": 23.703, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "grad_norm": 0.9087849855422974, | |
| "learning_rate": 0.0008598930481283423, | |
| "loss": 23.3917, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 1.7264, | |
| "grad_norm": 0.9356536269187927, | |
| "learning_rate": 0.000855614973262032, | |
| "loss": 23.6751, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.7328000000000001, | |
| "grad_norm": 0.9630663990974426, | |
| "learning_rate": 0.0008513368983957219, | |
| "loss": 23.4119, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 1.7391999999999999, | |
| "grad_norm": 1.0205367803573608, | |
| "learning_rate": 0.0008470588235294118, | |
| "loss": 23.2226, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 1.7456, | |
| "grad_norm": 1.0030864477157593, | |
| "learning_rate": 0.0008427807486631016, | |
| "loss": 23.6982, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 1.752, | |
| "grad_norm": 0.9470692276954651, | |
| "learning_rate": 0.0008385026737967914, | |
| "loss": 23.6646, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 1.7584, | |
| "grad_norm": 0.9366554617881775, | |
| "learning_rate": 0.0008342245989304813, | |
| "loss": 23.7817, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.7648000000000001, | |
| "grad_norm": 0.9047606587409973, | |
| "learning_rate": 0.0008299465240641711, | |
| "loss": 23.5856, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 1.7711999999999999, | |
| "grad_norm": 0.9083183407783508, | |
| "learning_rate": 0.000825668449197861, | |
| "loss": 23.694, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 1.7776, | |
| "grad_norm": 0.9529908895492554, | |
| "learning_rate": 0.0008213903743315509, | |
| "loss": 23.9012, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 1.784, | |
| "grad_norm": 0.9241888523101807, | |
| "learning_rate": 0.0008171122994652406, | |
| "loss": 23.6046, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 1.7904, | |
| "grad_norm": 0.869388997554779, | |
| "learning_rate": 0.0008128342245989305, | |
| "loss": 23.4944, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.7968, | |
| "grad_norm": 0.8648447394371033, | |
| "learning_rate": 0.0008085561497326204, | |
| "loss": 23.2361, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 1.8032, | |
| "grad_norm": 1.0052869319915771, | |
| "learning_rate": 0.0008042780748663103, | |
| "loss": 23.2241, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 1.8096, | |
| "grad_norm": 0.9710979461669922, | |
| "learning_rate": 0.0008, | |
| "loss": 23.3083, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 1.8159999999999998, | |
| "grad_norm": 0.9305456280708313, | |
| "learning_rate": 0.0007957219251336899, | |
| "loss": 23.5668, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 1.8224, | |
| "grad_norm": 0.9786150455474854, | |
| "learning_rate": 0.0007914438502673798, | |
| "loss": 23.576, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.8288, | |
| "grad_norm": 0.9177144169807434, | |
| "learning_rate": 0.0007871657754010695, | |
| "loss": 23.8692, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 1.8352, | |
| "grad_norm": 0.8715292811393738, | |
| "learning_rate": 0.0007828877005347593, | |
| "loss": 23.5924, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 1.8416000000000001, | |
| "grad_norm": 0.9923094511032104, | |
| "learning_rate": 0.0007786096256684492, | |
| "loss": 23.4065, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 1.8479999999999999, | |
| "grad_norm": 0.931535542011261, | |
| "learning_rate": 0.000774331550802139, | |
| "loss": 23.6056, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 1.8544, | |
| "grad_norm": 0.8508500456809998, | |
| "learning_rate": 0.0007700534759358289, | |
| "loss": 23.4128, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.8608, | |
| "grad_norm": 0.916963517665863, | |
| "learning_rate": 0.0007657754010695187, | |
| "loss": 23.6378, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 1.8672, | |
| "grad_norm": 0.8805645108222961, | |
| "learning_rate": 0.0007614973262032085, | |
| "loss": 23.2751, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 1.8736000000000002, | |
| "grad_norm": 0.9679768085479736, | |
| "learning_rate": 0.0007572192513368984, | |
| "loss": 23.496, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "grad_norm": 0.9578665494918823, | |
| "learning_rate": 0.0007529411764705883, | |
| "loss": 23.5754, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 1.8864, | |
| "grad_norm": 0.9930956959724426, | |
| "learning_rate": 0.000748663101604278, | |
| "loss": 23.3563, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.8928, | |
| "grad_norm": 0.8266828656196594, | |
| "learning_rate": 0.0007443850267379679, | |
| "loss": 23.7507, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 1.8992, | |
| "grad_norm": 0.9037677049636841, | |
| "learning_rate": 0.0007401069518716578, | |
| "loss": 23.3083, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 1.9056, | |
| "grad_norm": 0.9325649738311768, | |
| "learning_rate": 0.0007358288770053476, | |
| "loss": 23.4873, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 1.912, | |
| "grad_norm": 0.8425143957138062, | |
| "learning_rate": 0.0007315508021390375, | |
| "loss": 23.2706, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 1.9184, | |
| "grad_norm": 0.9997607469558716, | |
| "learning_rate": 0.0007272727272727273, | |
| "loss": 23.4578, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.9247999999999998, | |
| "grad_norm": 0.9266635775566101, | |
| "learning_rate": 0.0007229946524064171, | |
| "loss": 23.3936, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 1.9312, | |
| "grad_norm": 0.9372814893722534, | |
| "learning_rate": 0.000718716577540107, | |
| "loss": 23.7373, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 1.9376, | |
| "grad_norm": 1.0302927494049072, | |
| "learning_rate": 0.0007144385026737969, | |
| "loss": 23.3622, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 1.944, | |
| "grad_norm": 0.8410359025001526, | |
| "learning_rate": 0.0007101604278074866, | |
| "loss": 23.6175, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 1.9504000000000001, | |
| "grad_norm": 1.002364993095398, | |
| "learning_rate": 0.0007058823529411765, | |
| "loss": 23.7065, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.9567999999999999, | |
| "grad_norm": 0.8989892601966858, | |
| "learning_rate": 0.0007016042780748664, | |
| "loss": 23.1151, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 1.9632, | |
| "grad_norm": 0.9592454433441162, | |
| "learning_rate": 0.0006973262032085561, | |
| "loss": 23.6266, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 1.9696, | |
| "grad_norm": 0.9210177063941956, | |
| "learning_rate": 0.0006930481283422459, | |
| "loss": 23.2886, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 1.976, | |
| "grad_norm": 1.1872823238372803, | |
| "learning_rate": 0.0006887700534759358, | |
| "loss": 23.495, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 1.9824000000000002, | |
| "grad_norm": 0.9874787330627441, | |
| "learning_rate": 0.0006844919786096256, | |
| "loss": 23.6608, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.9888, | |
| "grad_norm": 1.1812684535980225, | |
| "learning_rate": 0.0006802139037433155, | |
| "loss": 23.5925, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 1.9952, | |
| "grad_norm": 1.0033948421478271, | |
| "learning_rate": 0.0006759358288770053, | |
| "loss": 23.4883, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.7030152082443237, | |
| "learning_rate": 0.0006716577540106952, | |
| "loss": 17.5334, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 2.0064, | |
| "grad_norm": 1.017948865890503, | |
| "learning_rate": 0.000667379679144385, | |
| "loss": 23.2155, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 2.0128, | |
| "grad_norm": 0.9184821248054504, | |
| "learning_rate": 0.0006631016042780749, | |
| "loss": 23.2131, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.0192, | |
| "grad_norm": 1.0140761137008667, | |
| "learning_rate": 0.0006588235294117648, | |
| "loss": 23.0792, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 2.0256, | |
| "grad_norm": 0.9820951223373413, | |
| "learning_rate": 0.0006545454545454545, | |
| "loss": 23.2678, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 2.032, | |
| "grad_norm": 0.9167227745056152, | |
| "learning_rate": 0.0006502673796791444, | |
| "loss": 23.7916, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 2.0384, | |
| "grad_norm": 0.935867428779602, | |
| "learning_rate": 0.0006459893048128343, | |
| "loss": 23.5265, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 2.0448, | |
| "grad_norm": 0.871694803237915, | |
| "learning_rate": 0.0006417112299465241, | |
| "loss": 23.2606, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.0512, | |
| "grad_norm": 0.9383721947669983, | |
| "learning_rate": 0.0006374331550802139, | |
| "loss": 23.4792, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 2.0576, | |
| "grad_norm": 0.9656952023506165, | |
| "learning_rate": 0.0006331550802139038, | |
| "loss": 23.1145, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 2.064, | |
| "grad_norm": 0.8393946886062622, | |
| "learning_rate": 0.0006288770053475936, | |
| "loss": 23.2965, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 2.0704, | |
| "grad_norm": 0.8161150217056274, | |
| "learning_rate": 0.0006245989304812835, | |
| "loss": 23.4888, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 2.0768, | |
| "grad_norm": 0.8854630589485168, | |
| "learning_rate": 0.0006203208556149733, | |
| "loss": 23.4233, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.0832, | |
| "grad_norm": 0.9034832119941711, | |
| "learning_rate": 0.0006160427807486631, | |
| "loss": 23.1208, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 2.0896, | |
| "grad_norm": 0.9552251696586609, | |
| "learning_rate": 0.000611764705882353, | |
| "loss": 23.2775, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 2.096, | |
| "grad_norm": 0.9383313059806824, | |
| "learning_rate": 0.0006074866310160429, | |
| "loss": 23.096, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 2.1024, | |
| "grad_norm": 0.9303053021430969, | |
| "learning_rate": 0.0006032085561497325, | |
| "loss": 23.6542, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 2.1088, | |
| "grad_norm": 0.9048749804496765, | |
| "learning_rate": 0.0005989304812834224, | |
| "loss": 23.3699, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.1152, | |
| "grad_norm": 0.9803421497344971, | |
| "learning_rate": 0.0005946524064171123, | |
| "loss": 23.3341, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 2.1216, | |
| "grad_norm": 0.8741451501846313, | |
| "learning_rate": 0.0005903743315508021, | |
| "loss": 23.2353, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 2.128, | |
| "grad_norm": 0.9050042629241943, | |
| "learning_rate": 0.000586096256684492, | |
| "loss": 23.7765, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 2.1344, | |
| "grad_norm": 0.8833932280540466, | |
| "learning_rate": 0.0005818181818181818, | |
| "loss": 23.154, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 2.1408, | |
| "grad_norm": 0.872748851776123, | |
| "learning_rate": 0.0005775401069518716, | |
| "loss": 23.5524, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.1471999999999998, | |
| "grad_norm": 0.9564759731292725, | |
| "learning_rate": 0.0005732620320855615, | |
| "loss": 23.3869, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 2.1536, | |
| "grad_norm": 0.9444039463996887, | |
| "learning_rate": 0.0005689839572192514, | |
| "loss": 23.5905, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 1.1383410692214966, | |
| "learning_rate": 0.0005647058823529411, | |
| "loss": 23.1071, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 2.1664, | |
| "grad_norm": 0.8750917911529541, | |
| "learning_rate": 0.000560427807486631, | |
| "loss": 23.5244, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 2.1728, | |
| "grad_norm": 0.8726853132247925, | |
| "learning_rate": 0.0005561497326203209, | |
| "loss": 23.0605, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.1792, | |
| "grad_norm": 0.9074258208274841, | |
| "learning_rate": 0.0005518716577540108, | |
| "loss": 23.243, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 2.1856, | |
| "grad_norm": 0.9091795086860657, | |
| "learning_rate": 0.0005475935828877005, | |
| "loss": 23.3617, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 2.192, | |
| "grad_norm": 0.909899115562439, | |
| "learning_rate": 0.0005433155080213904, | |
| "loss": 23.1892, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 2.1984, | |
| "grad_norm": 0.9476255774497986, | |
| "learning_rate": 0.0005390374331550803, | |
| "loss": 23.3369, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 2.2048, | |
| "grad_norm": 0.9601420760154724, | |
| "learning_rate": 0.0005347593582887701, | |
| "loss": 23.5679, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.2112, | |
| "grad_norm": 0.9360019564628601, | |
| "learning_rate": 0.0005304812834224599, | |
| "loss": 23.007, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 2.2176, | |
| "grad_norm": 0.9346802830696106, | |
| "learning_rate": 0.0005262032085561498, | |
| "loss": 23.0159, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 2.224, | |
| "grad_norm": 0.9711213111877441, | |
| "learning_rate": 0.0005219251336898396, | |
| "loss": 23.3363, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 2.2304, | |
| "grad_norm": 0.9546957612037659, | |
| "learning_rate": 0.0005176470588235295, | |
| "loss": 23.1201, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 2.2368, | |
| "grad_norm": 0.916655957698822, | |
| "learning_rate": 0.0005133689839572194, | |
| "loss": 23.2946, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.2432, | |
| "grad_norm": 0.8734573125839233, | |
| "learning_rate": 0.000509090909090909, | |
| "loss": 23.2751, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 2.2496, | |
| "grad_norm": 0.9505683779716492, | |
| "learning_rate": 0.0005048128342245989, | |
| "loss": 23.3028, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 2.2560000000000002, | |
| "grad_norm": 0.934208333492279, | |
| "learning_rate": 0.0005005347593582888, | |
| "loss": 23.2606, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 2.2624, | |
| "grad_norm": 0.8738580942153931, | |
| "learning_rate": 0.0004962566844919787, | |
| "loss": 23.0636, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 2.2688, | |
| "grad_norm": 0.9497429132461548, | |
| "learning_rate": 0.0004919786096256685, | |
| "loss": 23.3791, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.2752, | |
| "grad_norm": 0.9983781576156616, | |
| "learning_rate": 0.00048770053475935825, | |
| "loss": 23.4518, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 2.2816, | |
| "grad_norm": 0.9503285884857178, | |
| "learning_rate": 0.00048342245989304813, | |
| "loss": 23.2358, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 2.288, | |
| "grad_norm": 0.9116323590278625, | |
| "learning_rate": 0.00047914438502673796, | |
| "loss": 23.0996, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 2.2944, | |
| "grad_norm": 0.8628079295158386, | |
| "learning_rate": 0.0004748663101604278, | |
| "loss": 23.5187, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 2.3008, | |
| "grad_norm": 0.9411908984184265, | |
| "learning_rate": 0.00047058823529411766, | |
| "loss": 23.5873, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.3072, | |
| "grad_norm": 0.9314679503440857, | |
| "learning_rate": 0.0004663101604278075, | |
| "loss": 23.3654, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 2.3136, | |
| "grad_norm": 0.9462237358093262, | |
| "learning_rate": 0.00046203208556149736, | |
| "loss": 23.0101, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 0.889033317565918, | |
| "learning_rate": 0.0004577540106951872, | |
| "loss": 23.4283, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 2.3264, | |
| "grad_norm": 0.9167575836181641, | |
| "learning_rate": 0.000453475935828877, | |
| "loss": 23.2583, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 2.3327999999999998, | |
| "grad_norm": 0.9349282383918762, | |
| "learning_rate": 0.0004491978609625669, | |
| "loss": 23.463, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.3392, | |
| "grad_norm": 1.114784836769104, | |
| "learning_rate": 0.0004449197860962567, | |
| "loss": 23.2864, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 2.3456, | |
| "grad_norm": 0.9500236511230469, | |
| "learning_rate": 0.0004406417112299465, | |
| "loss": 23.4031, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 2.352, | |
| "grad_norm": 1.0214568376541138, | |
| "learning_rate": 0.00043636363636363637, | |
| "loss": 23.2233, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 2.3584, | |
| "grad_norm": 1.118489146232605, | |
| "learning_rate": 0.0004320855614973262, | |
| "loss": 23.7254, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 2.3648, | |
| "grad_norm": 0.9648391008377075, | |
| "learning_rate": 0.000427807486631016, | |
| "loss": 23.3774, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.3712, | |
| "grad_norm": 0.9128082394599915, | |
| "learning_rate": 0.0004235294117647059, | |
| "loss": 23.5378, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 2.3776, | |
| "grad_norm": 0.9346900582313538, | |
| "learning_rate": 0.0004192513368983957, | |
| "loss": 23.6871, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 2.384, | |
| "grad_norm": 1.0307550430297852, | |
| "learning_rate": 0.00041497326203208555, | |
| "loss": 23.2258, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 2.3904, | |
| "grad_norm": 0.9579919576644897, | |
| "learning_rate": 0.00041069518716577543, | |
| "loss": 23.269, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 2.3968, | |
| "grad_norm": 0.9140896201133728, | |
| "learning_rate": 0.00040641711229946525, | |
| "loss": 23.3352, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.4032, | |
| "grad_norm": 0.9879317879676819, | |
| "learning_rate": 0.00040213903743315513, | |
| "loss": 23.2264, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 2.4096, | |
| "grad_norm": 1.1229231357574463, | |
| "learning_rate": 0.00039786096256684496, | |
| "loss": 23.5893, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 2.416, | |
| "grad_norm": 0.8806203007698059, | |
| "learning_rate": 0.00039358288770053473, | |
| "loss": 23.3295, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 2.4224, | |
| "grad_norm": 1.0155560970306396, | |
| "learning_rate": 0.0003893048128342246, | |
| "loss": 23.0444, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 2.4288, | |
| "grad_norm": 0.9045442342758179, | |
| "learning_rate": 0.00038502673796791443, | |
| "loss": 22.9648, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.4352, | |
| "grad_norm": 0.9359453320503235, | |
| "learning_rate": 0.00038074866310160426, | |
| "loss": 23.6534, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 2.4416, | |
| "grad_norm": 0.9333987236022949, | |
| "learning_rate": 0.00037647058823529414, | |
| "loss": 23.3959, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 2.448, | |
| "grad_norm": 0.8483988046646118, | |
| "learning_rate": 0.00037219251336898396, | |
| "loss": 23.8575, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 2.4544, | |
| "grad_norm": 0.8853796720504761, | |
| "learning_rate": 0.0003679144385026738, | |
| "loss": 23.0032, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 2.4608, | |
| "grad_norm": 0.9364005327224731, | |
| "learning_rate": 0.00036363636363636367, | |
| "loss": 23.1803, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.4672, | |
| "grad_norm": 0.820959210395813, | |
| "learning_rate": 0.0003593582887700535, | |
| "loss": 23.4326, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 2.4736000000000002, | |
| "grad_norm": 0.927909255027771, | |
| "learning_rate": 0.0003550802139037433, | |
| "loss": 22.8314, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "grad_norm": 0.9819096922874451, | |
| "learning_rate": 0.0003508021390374332, | |
| "loss": 23.3604, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 2.4864, | |
| "grad_norm": 0.8942359089851379, | |
| "learning_rate": 0.00034652406417112297, | |
| "loss": 23.3988, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 2.4928, | |
| "grad_norm": 0.9608676433563232, | |
| "learning_rate": 0.0003422459893048128, | |
| "loss": 23.2326, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.4992, | |
| "grad_norm": 0.8843042254447937, | |
| "learning_rate": 0.0003379679144385027, | |
| "loss": 23.3506, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 2.5056000000000003, | |
| "grad_norm": 0.8857446312904358, | |
| "learning_rate": 0.0003336898395721925, | |
| "loss": 23.1861, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 2.512, | |
| "grad_norm": 0.8458705544471741, | |
| "learning_rate": 0.0003294117647058824, | |
| "loss": 23.0167, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 2.5183999999999997, | |
| "grad_norm": 0.8760762214660645, | |
| "learning_rate": 0.0003251336898395722, | |
| "loss": 23.5173, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 2.5248, | |
| "grad_norm": 0.8493122458457947, | |
| "learning_rate": 0.00032085561497326203, | |
| "loss": 23.257, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.5312, | |
| "grad_norm": 0.8905952572822571, | |
| "learning_rate": 0.0003165775401069519, | |
| "loss": 23.1194, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 2.5376, | |
| "grad_norm": 0.9125280976295471, | |
| "learning_rate": 0.00031229946524064173, | |
| "loss": 23.0237, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 2.544, | |
| "grad_norm": 0.9156554341316223, | |
| "learning_rate": 0.00030802139037433156, | |
| "loss": 23.4626, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 2.5504, | |
| "grad_norm": 0.8901218771934509, | |
| "learning_rate": 0.00030374331550802144, | |
| "loss": 22.9936, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 2.5568, | |
| "grad_norm": 0.8714930415153503, | |
| "learning_rate": 0.0002994652406417112, | |
| "loss": 23.1742, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.5568, | |
| "eval_loss": 1.4636129140853882, | |
| "eval_runtime": 1115.7802, | |
| "eval_samples_per_second": 8.962, | |
| "eval_steps_per_second": 1.12, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.5632, | |
| "grad_norm": 0.8894703984260559, | |
| "learning_rate": 0.00029518716577540103, | |
| "loss": 23.6523, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 2.5696, | |
| "grad_norm": 0.8964853286743164, | |
| "learning_rate": 0.0002909090909090909, | |
| "loss": 22.9383, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 2.576, | |
| "grad_norm": 0.8526540398597717, | |
| "learning_rate": 0.00028663101604278074, | |
| "loss": 23.1684, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 2.5824, | |
| "grad_norm": 1.0182241201400757, | |
| "learning_rate": 0.00028235294117647056, | |
| "loss": 23.3383, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 2.5888, | |
| "grad_norm": 0.9509109854698181, | |
| "learning_rate": 0.00027807486631016044, | |
| "loss": 23.4199, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.5952, | |
| "grad_norm": 0.8805685639381409, | |
| "learning_rate": 0.00027379679144385027, | |
| "loss": 23.3654, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 2.6016, | |
| "grad_norm": 0.9017741680145264, | |
| "learning_rate": 0.00026951871657754015, | |
| "loss": 23.2814, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 2.608, | |
| "grad_norm": 0.844363808631897, | |
| "learning_rate": 0.00026524064171122997, | |
| "loss": 23.3663, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 2.6144, | |
| "grad_norm": 0.868501603603363, | |
| "learning_rate": 0.0002609625668449198, | |
| "loss": 23.3927, | |
| "step": 818 | |
| }, | |
| { | |
| "epoch": 2.6208, | |
| "grad_norm": 0.965802788734436, | |
| "learning_rate": 0.0002566844919786097, | |
| "loss": 23.3886, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.6272, | |
| "grad_norm": 0.9137208461761475, | |
| "learning_rate": 0.00025240641711229945, | |
| "loss": 23.2161, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 2.6336, | |
| "grad_norm": 0.8800885081291199, | |
| "learning_rate": 0.00024812834224598933, | |
| "loss": 23.4479, | |
| "step": 824 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "grad_norm": 0.8783839344978333, | |
| "learning_rate": 0.00024385026737967913, | |
| "loss": 23.1788, | |
| "step": 826 | |
| }, | |
| { | |
| "epoch": 2.6464, | |
| "grad_norm": 0.8877150416374207, | |
| "learning_rate": 0.00023957219251336898, | |
| "loss": 23.3931, | |
| "step": 828 | |
| }, | |
| { | |
| "epoch": 2.6528, | |
| "grad_norm": 0.8638187050819397, | |
| "learning_rate": 0.00023529411764705883, | |
| "loss": 23.1699, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.6592000000000002, | |
| "grad_norm": 0.8768156170845032, | |
| "learning_rate": 0.00023101604278074868, | |
| "loss": 23.2747, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 2.6656, | |
| "grad_norm": 0.9169402718544006, | |
| "learning_rate": 0.0002267379679144385, | |
| "loss": 23.281, | |
| "step": 834 | |
| }, | |
| { | |
| "epoch": 2.672, | |
| "grad_norm": 0.978992223739624, | |
| "learning_rate": 0.00022245989304812836, | |
| "loss": 23.0263, | |
| "step": 836 | |
| }, | |
| { | |
| "epoch": 2.6784, | |
| "grad_norm": 0.9082220792770386, | |
| "learning_rate": 0.00021818181818181818, | |
| "loss": 23.1189, | |
| "step": 838 | |
| }, | |
| { | |
| "epoch": 2.6848, | |
| "grad_norm": 0.9290631413459778, | |
| "learning_rate": 0.000213903743315508, | |
| "loss": 23.6318, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.6912000000000003, | |
| "grad_norm": 0.9007918834686279, | |
| "learning_rate": 0.00020962566844919786, | |
| "loss": 23.0228, | |
| "step": 842 | |
| }, | |
| { | |
| "epoch": 2.6976, | |
| "grad_norm": 0.9045092463493347, | |
| "learning_rate": 0.00020534759358288771, | |
| "loss": 23.1173, | |
| "step": 844 | |
| }, | |
| { | |
| "epoch": 2.7039999999999997, | |
| "grad_norm": 0.8459358215332031, | |
| "learning_rate": 0.00020106951871657757, | |
| "loss": 23.0842, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 2.7104, | |
| "grad_norm": 0.8688234686851501, | |
| "learning_rate": 0.00019679144385026736, | |
| "loss": 23.1473, | |
| "step": 848 | |
| }, | |
| { | |
| "epoch": 2.7168, | |
| "grad_norm": 0.8705434203147888, | |
| "learning_rate": 0.00019251336898395722, | |
| "loss": 23.2901, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.7232, | |
| "grad_norm": 0.9049879908561707, | |
| "learning_rate": 0.00018823529411764707, | |
| "loss": 23.2844, | |
| "step": 852 | |
| }, | |
| { | |
| "epoch": 2.7296, | |
| "grad_norm": 0.8677183985710144, | |
| "learning_rate": 0.0001839572192513369, | |
| "loss": 23.441, | |
| "step": 854 | |
| }, | |
| { | |
| "epoch": 2.7359999999999998, | |
| "grad_norm": 0.8370922207832336, | |
| "learning_rate": 0.00017967914438502675, | |
| "loss": 23.3627, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 2.7424, | |
| "grad_norm": 0.827491819858551, | |
| "learning_rate": 0.0001754010695187166, | |
| "loss": 23.3567, | |
| "step": 858 | |
| }, | |
| { | |
| "epoch": 2.7488, | |
| "grad_norm": 0.8676392436027527, | |
| "learning_rate": 0.0001711229946524064, | |
| "loss": 23.5228, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.7552, | |
| "grad_norm": 0.916731059551239, | |
| "learning_rate": 0.00016684491978609625, | |
| "loss": 23.4274, | |
| "step": 862 | |
| }, | |
| { | |
| "epoch": 2.7616, | |
| "grad_norm": 0.8114993572235107, | |
| "learning_rate": 0.0001625668449197861, | |
| "loss": 23.0131, | |
| "step": 864 | |
| }, | |
| { | |
| "epoch": 2.768, | |
| "grad_norm": 0.821692705154419, | |
| "learning_rate": 0.00015828877005347595, | |
| "loss": 22.9853, | |
| "step": 866 | |
| }, | |
| { | |
| "epoch": 2.7744, | |
| "grad_norm": 0.8291884064674377, | |
| "learning_rate": 0.00015401069518716578, | |
| "loss": 22.6708, | |
| "step": 868 | |
| }, | |
| { | |
| "epoch": 2.7808, | |
| "grad_norm": 0.8806215524673462, | |
| "learning_rate": 0.0001497326203208556, | |
| "loss": 23.079, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.7872, | |
| "grad_norm": 0.8456304669380188, | |
| "learning_rate": 0.00014545454545454546, | |
| "loss": 23.3899, | |
| "step": 872 | |
| }, | |
| { | |
| "epoch": 2.7936, | |
| "grad_norm": 0.8849140405654907, | |
| "learning_rate": 0.00014117647058823528, | |
| "loss": 23.3624, | |
| "step": 874 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 0.8567163944244385, | |
| "learning_rate": 0.00013689839572192513, | |
| "loss": 23.2244, | |
| "step": 876 | |
| }, | |
| { | |
| "epoch": 2.8064, | |
| "grad_norm": 0.886084258556366, | |
| "learning_rate": 0.00013262032085561499, | |
| "loss": 23.275, | |
| "step": 878 | |
| }, | |
| { | |
| "epoch": 2.8128, | |
| "grad_norm": 0.8626444339752197, | |
| "learning_rate": 0.00012834224598930484, | |
| "loss": 23.3534, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.8192, | |
| "grad_norm": 0.8719081282615662, | |
| "learning_rate": 0.00012406417112299466, | |
| "loss": 23.7974, | |
| "step": 882 | |
| }, | |
| { | |
| "epoch": 2.8256, | |
| "grad_norm": 0.8361597061157227, | |
| "learning_rate": 0.00011978609625668449, | |
| "loss": 23.2917, | |
| "step": 884 | |
| }, | |
| { | |
| "epoch": 2.832, | |
| "grad_norm": 0.9440580606460571, | |
| "learning_rate": 0.00011550802139037434, | |
| "loss": 23.2307, | |
| "step": 886 | |
| }, | |
| { | |
| "epoch": 2.8384, | |
| "grad_norm": 0.84004145860672, | |
| "learning_rate": 0.00011122994652406418, | |
| "loss": 23.135, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 2.8448, | |
| "grad_norm": 0.836331307888031, | |
| "learning_rate": 0.000106951871657754, | |
| "loss": 23.327, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.8512, | |
| "grad_norm": 0.9749852418899536, | |
| "learning_rate": 0.00010267379679144386, | |
| "loss": 23.0129, | |
| "step": 892 | |
| }, | |
| { | |
| "epoch": 2.8576, | |
| "grad_norm": 0.902911365032196, | |
| "learning_rate": 9.839572192513368e-05, | |
| "loss": 23.4656, | |
| "step": 894 | |
| }, | |
| { | |
| "epoch": 2.864, | |
| "grad_norm": 0.8584335446357727, | |
| "learning_rate": 9.411764705882353e-05, | |
| "loss": 22.9288, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 2.8704, | |
| "grad_norm": 0.8301668167114258, | |
| "learning_rate": 8.983957219251337e-05, | |
| "loss": 23.4383, | |
| "step": 898 | |
| }, | |
| { | |
| "epoch": 2.8768000000000002, | |
| "grad_norm": 0.8426039814949036, | |
| "learning_rate": 8.55614973262032e-05, | |
| "loss": 23.2636, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.8832, | |
| "grad_norm": 0.8241990208625793, | |
| "learning_rate": 8.128342245989305e-05, | |
| "loss": 23.4808, | |
| "step": 902 | |
| }, | |
| { | |
| "epoch": 2.8895999999999997, | |
| "grad_norm": 0.8555430769920349, | |
| "learning_rate": 7.700534759358289e-05, | |
| "loss": 22.9983, | |
| "step": 904 | |
| }, | |
| { | |
| "epoch": 2.896, | |
| "grad_norm": 0.8600494265556335, | |
| "learning_rate": 7.272727272727273e-05, | |
| "loss": 23.2584, | |
| "step": 906 | |
| }, | |
| { | |
| "epoch": 2.9024, | |
| "grad_norm": 0.9131312966346741, | |
| "learning_rate": 6.844919786096257e-05, | |
| "loss": 23.4513, | |
| "step": 908 | |
| }, | |
| { | |
| "epoch": 2.9088000000000003, | |
| "grad_norm": 0.8168982267379761, | |
| "learning_rate": 6.417112299465242e-05, | |
| "loss": 23.4352, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.9152, | |
| "grad_norm": 0.8447926640510559, | |
| "learning_rate": 5.9893048128342244e-05, | |
| "loss": 23.2975, | |
| "step": 912 | |
| }, | |
| { | |
| "epoch": 2.9215999999999998, | |
| "grad_norm": 0.8652685284614563, | |
| "learning_rate": 5.561497326203209e-05, | |
| "loss": 23.5172, | |
| "step": 914 | |
| }, | |
| { | |
| "epoch": 2.928, | |
| "grad_norm": 0.8298205733299255, | |
| "learning_rate": 5.133689839572193e-05, | |
| "loss": 23.5154, | |
| "step": 916 | |
| }, | |
| { | |
| "epoch": 2.9344, | |
| "grad_norm": 0.8782302141189575, | |
| "learning_rate": 4.705882352941177e-05, | |
| "loss": 23.3597, | |
| "step": 918 | |
| }, | |
| { | |
| "epoch": 2.9408, | |
| "grad_norm": 0.9018718004226685, | |
| "learning_rate": 4.27807486631016e-05, | |
| "loss": 23.601, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.9472, | |
| "grad_norm": 0.912187933921814, | |
| "learning_rate": 3.8502673796791445e-05, | |
| "loss": 23.4442, | |
| "step": 922 | |
| }, | |
| { | |
| "epoch": 2.9536, | |
| "grad_norm": 0.8485115170478821, | |
| "learning_rate": 3.4224598930481284e-05, | |
| "loss": 23.0539, | |
| "step": 924 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "grad_norm": 0.880998432636261, | |
| "learning_rate": 2.9946524064171122e-05, | |
| "loss": 23.1452, | |
| "step": 926 | |
| }, | |
| { | |
| "epoch": 2.9664, | |
| "grad_norm": 0.8453803658485413, | |
| "learning_rate": 2.5668449197860964e-05, | |
| "loss": 22.8998, | |
| "step": 928 | |
| }, | |
| { | |
| "epoch": 2.9728, | |
| "grad_norm": 0.8399450182914734, | |
| "learning_rate": 2.13903743315508e-05, | |
| "loss": 23.0928, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.9792, | |
| "grad_norm": 0.8890286684036255, | |
| "learning_rate": 1.7112299465240642e-05, | |
| "loss": 23.0015, | |
| "step": 932 | |
| }, | |
| { | |
| "epoch": 2.9856, | |
| "grad_norm": 0.8455818295478821, | |
| "learning_rate": 1.2834224598930482e-05, | |
| "loss": 23.4294, | |
| "step": 934 | |
| }, | |
| { | |
| "epoch": 2.992, | |
| "grad_norm": 0.8911629915237427, | |
| "learning_rate": 8.556149732620321e-06, | |
| "loss": 23.0343, | |
| "step": 936 | |
| }, | |
| { | |
| "epoch": 2.992, | |
| "step": 936, | |
| "total_flos": 1.015865179865088e+19, | |
| "train_loss": 28.25554455243624, | |
| "train_runtime": 30178.7669, | |
| "train_samples_per_second": 7.953, | |
| "train_steps_per_second": 0.031 | |
| }, | |
| { | |
| "epoch": 2.992, | |
| "eval_loss": 1.4616048336029053, | |
| "eval_runtime": 1115.0715, | |
| "eval_samples_per_second": 8.968, | |
| "eval_steps_per_second": 1.121, | |
| "step": 936 | |
| }, | |
| { | |
| "epoch": 2.992, | |
| "eval_loss": 1.4569849967956543, | |
| "eval_runtime": 1130.2457, | |
| "eval_samples_per_second": 8.848, | |
| "eval_steps_per_second": 1.106, | |
| "step": 936 | |
| } | |
| ], | |
| "logging_steps": 2, | |
| "max_steps": 936, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.015865179865088e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |