| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.2346368715083798, |
| "eval_steps": 500, |
| "global_step": 400, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.01, |
| "learning_rate": 0.00019972067039106145, |
| "loss": 2.6443, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.01, |
| "learning_rate": 0.00019944134078212292, |
| "loss": 2.4104, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.02, |
| "learning_rate": 0.00019916201117318435, |
| "loss": 2.4975, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.02, |
| "learning_rate": 0.00019888268156424582, |
| "loss": 2.3513, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.03, |
| "learning_rate": 0.0001986033519553073, |
| "loss": 2.4274, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.03, |
| "learning_rate": 0.00019832402234636873, |
| "loss": 2.3628, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.04, |
| "learning_rate": 0.0001980446927374302, |
| "loss": 2.3567, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.04, |
| "learning_rate": 0.00019776536312849163, |
| "loss": 2.4121, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.05, |
| "learning_rate": 0.00019748603351955307, |
| "loss": 2.4033, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.06, |
| "learning_rate": 0.00019720670391061454, |
| "loss": 2.2805, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.06, |
| "learning_rate": 0.00019692737430167598, |
| "loss": 2.2639, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.07, |
| "learning_rate": 0.00019664804469273744, |
| "loss": 2.2724, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.07, |
| "learning_rate": 0.00019636871508379888, |
| "loss": 2.332, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.08, |
| "learning_rate": 0.00019608938547486035, |
| "loss": 2.2261, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.08, |
| "learning_rate": 0.00019581005586592182, |
| "loss": 2.2208, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.09, |
| "learning_rate": 0.00019553072625698326, |
| "loss": 2.3351, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.09, |
| "learning_rate": 0.0001952513966480447, |
| "loss": 2.2475, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.1, |
| "learning_rate": 0.00019497206703910616, |
| "loss": 2.3283, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.11, |
| "learning_rate": 0.0001946927374301676, |
| "loss": 2.1346, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.11, |
| "learning_rate": 0.00019441340782122907, |
| "loss": 2.131, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.12, |
| "learning_rate": 0.0001941340782122905, |
| "loss": 2.1718, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.12, |
| "learning_rate": 0.00019385474860335195, |
| "loss": 2.2446, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.13, |
| "learning_rate": 0.0001935754189944134, |
| "loss": 2.306, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.13, |
| "learning_rate": 0.00019329608938547488, |
| "loss": 2.1908, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.14, |
| "learning_rate": 0.00019301675977653632, |
| "loss": 2.2844, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.15, |
| "learning_rate": 0.00019273743016759779, |
| "loss": 2.2235, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.15, |
| "learning_rate": 0.00019245810055865922, |
| "loss": 2.1842, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.16, |
| "learning_rate": 0.00019217877094972066, |
| "loss": 2.2675, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.16, |
| "learning_rate": 0.00019189944134078213, |
| "loss": 2.2532, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 0.00019162011173184357, |
| "loss": 2.1788, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 0.00019134078212290504, |
| "loss": 2.2494, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.18, |
| "learning_rate": 0.0001910614525139665, |
| "loss": 2.1995, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.18, |
| "learning_rate": 0.00019078212290502794, |
| "loss": 2.1451, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.19, |
| "learning_rate": 0.0001905027932960894, |
| "loss": 2.223, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.2, |
| "learning_rate": 0.00019022346368715085, |
| "loss": 2.2854, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.2, |
| "learning_rate": 0.0001899441340782123, |
| "loss": 2.2265, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.21, |
| "learning_rate": 0.00018966480446927375, |
| "loss": 2.1214, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.21, |
| "learning_rate": 0.0001893854748603352, |
| "loss": 2.1898, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.22, |
| "learning_rate": 0.00018910614525139666, |
| "loss": 2.1974, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.22, |
| "learning_rate": 0.0001888268156424581, |
| "loss": 2.2259, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.23, |
| "learning_rate": 0.00018854748603351957, |
| "loss": 2.2094, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.23, |
| "learning_rate": 0.00018826815642458103, |
| "loss": 2.1731, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.24, |
| "learning_rate": 0.00018798882681564247, |
| "loss": 2.2373, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.25, |
| "learning_rate": 0.0001877094972067039, |
| "loss": 2.2295, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.25, |
| "learning_rate": 0.00018743016759776538, |
| "loss": 2.1947, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.26, |
| "learning_rate": 0.00018715083798882682, |
| "loss": 2.2115, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.26, |
| "learning_rate": 0.00018687150837988828, |
| "loss": 2.1224, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 0.00018659217877094972, |
| "loss": 2.2137, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 0.00018631284916201116, |
| "loss": 2.2338, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.28, |
| "learning_rate": 0.00018603351955307266, |
| "loss": 2.1298, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.28, |
| "learning_rate": 0.0001857541899441341, |
| "loss": 2.0883, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.29, |
| "learning_rate": 0.00018547486033519553, |
| "loss": 2.1216, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 0.000185195530726257, |
| "loss": 2.2112, |
| "step": 53 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 0.00018491620111731844, |
| "loss": 2.1224, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.31, |
| "learning_rate": 0.0001846368715083799, |
| "loss": 2.2375, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.31, |
| "learning_rate": 0.00018435754189944135, |
| "loss": 2.2235, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.32, |
| "learning_rate": 0.00018407821229050279, |
| "loss": 2.1682, |
| "step": 57 |
| }, |
| { |
| "epoch": 0.32, |
| "learning_rate": 0.00018379888268156425, |
| "loss": 2.2077, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 0.00018351955307262572, |
| "loss": 2.1596, |
| "step": 59 |
| }, |
| { |
| "epoch": 0.34, |
| "learning_rate": 0.00018324022346368716, |
| "loss": 2.1311, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.34, |
| "learning_rate": 0.00018296089385474862, |
| "loss": 2.1333, |
| "step": 61 |
| }, |
| { |
| "epoch": 0.35, |
| "learning_rate": 0.00018268156424581006, |
| "loss": 2.0901, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.35, |
| "learning_rate": 0.00018240223463687153, |
| "loss": 2.1971, |
| "step": 63 |
| }, |
| { |
| "epoch": 0.36, |
| "learning_rate": 0.00018212290502793297, |
| "loss": 2.2602, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.36, |
| "learning_rate": 0.0001818435754189944, |
| "loss": 2.2194, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.37, |
| "learning_rate": 0.00018156424581005588, |
| "loss": 2.1218, |
| "step": 66 |
| }, |
| { |
| "epoch": 0.37, |
| "learning_rate": 0.00018128491620111731, |
| "loss": 2.2049, |
| "step": 67 |
| }, |
| { |
| "epoch": 0.38, |
| "learning_rate": 0.00018100558659217878, |
| "loss": 2.1521, |
| "step": 68 |
| }, |
| { |
| "epoch": 0.39, |
| "learning_rate": 0.00018072625698324025, |
| "loss": 2.112, |
| "step": 69 |
| }, |
| { |
| "epoch": 0.39, |
| "learning_rate": 0.0001804469273743017, |
| "loss": 2.1906, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.4, |
| "learning_rate": 0.00018016759776536313, |
| "loss": 2.1717, |
| "step": 71 |
| }, |
| { |
| "epoch": 0.4, |
| "learning_rate": 0.0001798882681564246, |
| "loss": 2.0712, |
| "step": 72 |
| }, |
| { |
| "epoch": 0.41, |
| "learning_rate": 0.00017960893854748603, |
| "loss": 2.141, |
| "step": 73 |
| }, |
| { |
| "epoch": 0.41, |
| "learning_rate": 0.0001793296089385475, |
| "loss": 2.0656, |
| "step": 74 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 0.00017905027932960894, |
| "loss": 2.1125, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 0.00017877094972067038, |
| "loss": 2.0869, |
| "step": 76 |
| }, |
| { |
| "epoch": 0.43, |
| "learning_rate": 0.00017849162011173187, |
| "loss": 2.2478, |
| "step": 77 |
| }, |
| { |
| "epoch": 0.44, |
| "learning_rate": 0.0001782122905027933, |
| "loss": 2.1535, |
| "step": 78 |
| }, |
| { |
| "epoch": 0.44, |
| "learning_rate": 0.00017793296089385475, |
| "loss": 2.1927, |
| "step": 79 |
| }, |
| { |
| "epoch": 0.45, |
| "learning_rate": 0.00017765363128491622, |
| "loss": 2.1213, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.45, |
| "learning_rate": 0.00017737430167597766, |
| "loss": 2.0981, |
| "step": 81 |
| }, |
| { |
| "epoch": 0.46, |
| "learning_rate": 0.00017709497206703912, |
| "loss": 2.1828, |
| "step": 82 |
| }, |
| { |
| "epoch": 0.46, |
| "learning_rate": 0.00017681564245810056, |
| "loss": 2.0562, |
| "step": 83 |
| }, |
| { |
| "epoch": 0.47, |
| "learning_rate": 0.000176536312849162, |
| "loss": 2.1334, |
| "step": 84 |
| }, |
| { |
| "epoch": 0.47, |
| "learning_rate": 0.00017625698324022347, |
| "loss": 2.1225, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.48, |
| "learning_rate": 0.00017597765363128493, |
| "loss": 2.2098, |
| "step": 86 |
| }, |
| { |
| "epoch": 0.49, |
| "learning_rate": 0.00017569832402234637, |
| "loss": 2.1519, |
| "step": 87 |
| }, |
| { |
| "epoch": 0.49, |
| "learning_rate": 0.00017541899441340784, |
| "loss": 2.1132, |
| "step": 88 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 0.00017513966480446928, |
| "loss": 2.0333, |
| "step": 89 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 0.00017486033519553075, |
| "loss": 2.2764, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.51, |
| "learning_rate": 0.00017458100558659218, |
| "loss": 2.1838, |
| "step": 91 |
| }, |
| { |
| "epoch": 0.51, |
| "learning_rate": 0.00017430167597765362, |
| "loss": 2.1386, |
| "step": 92 |
| }, |
| { |
| "epoch": 0.52, |
| "learning_rate": 0.0001740223463687151, |
| "loss": 2.1034, |
| "step": 93 |
| }, |
| { |
| "epoch": 0.53, |
| "learning_rate": 0.00017374301675977656, |
| "loss": 2.0346, |
| "step": 94 |
| }, |
| { |
| "epoch": 0.53, |
| "learning_rate": 0.000173463687150838, |
| "loss": 2.0274, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.54, |
| "learning_rate": 0.00017318435754189946, |
| "loss": 2.1036, |
| "step": 96 |
| }, |
| { |
| "epoch": 0.54, |
| "learning_rate": 0.0001729050279329609, |
| "loss": 2.1208, |
| "step": 97 |
| }, |
| { |
| "epoch": 0.55, |
| "learning_rate": 0.00017262569832402237, |
| "loss": 2.0572, |
| "step": 98 |
| }, |
| { |
| "epoch": 0.55, |
| "learning_rate": 0.0001723463687150838, |
| "loss": 2.1702, |
| "step": 99 |
| }, |
| { |
| "epoch": 0.56, |
| "learning_rate": 0.00017206703910614525, |
| "loss": 2.1302, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.56, |
| "learning_rate": 0.0001717877094972067, |
| "loss": 2.0175, |
| "step": 101 |
| }, |
| { |
| "epoch": 0.57, |
| "learning_rate": 0.00017150837988826815, |
| "loss": 2.1006, |
| "step": 102 |
| }, |
| { |
| "epoch": 0.58, |
| "learning_rate": 0.00017122905027932962, |
| "loss": 2.0662, |
| "step": 103 |
| }, |
| { |
| "epoch": 0.58, |
| "learning_rate": 0.00017094972067039109, |
| "loss": 1.988, |
| "step": 104 |
| }, |
| { |
| "epoch": 0.59, |
| "learning_rate": 0.00017067039106145253, |
| "loss": 2.1008, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.59, |
| "learning_rate": 0.00017039106145251396, |
| "loss": 2.1482, |
| "step": 106 |
| }, |
| { |
| "epoch": 0.6, |
| "learning_rate": 0.00017011173184357543, |
| "loss": 2.1052, |
| "step": 107 |
| }, |
| { |
| "epoch": 0.6, |
| "learning_rate": 0.00016983240223463687, |
| "loss": 2.0978, |
| "step": 108 |
| }, |
| { |
| "epoch": 0.61, |
| "learning_rate": 0.00016955307262569834, |
| "loss": 2.1303, |
| "step": 109 |
| }, |
| { |
| "epoch": 0.61, |
| "learning_rate": 0.00016927374301675978, |
| "loss": 2.0794, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.62, |
| "learning_rate": 0.00016899441340782122, |
| "loss": 2.1059, |
| "step": 111 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 0.0001687150837988827, |
| "loss": 1.9642, |
| "step": 112 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 0.00016843575418994415, |
| "loss": 2.0415, |
| "step": 113 |
| }, |
| { |
| "epoch": 0.64, |
| "learning_rate": 0.0001681564245810056, |
| "loss": 2.0795, |
| "step": 114 |
| }, |
| { |
| "epoch": 0.64, |
| "learning_rate": 0.00016787709497206705, |
| "loss": 2.0238, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.65, |
| "learning_rate": 0.0001675977653631285, |
| "loss": 2.078, |
| "step": 116 |
| }, |
| { |
| "epoch": 0.65, |
| "learning_rate": 0.00016731843575418996, |
| "loss": 2.1362, |
| "step": 117 |
| }, |
| { |
| "epoch": 0.66, |
| "learning_rate": 0.0001670391061452514, |
| "loss": 2.0552, |
| "step": 118 |
| }, |
| { |
| "epoch": 0.66, |
| "learning_rate": 0.00016675977653631284, |
| "loss": 2.213, |
| "step": 119 |
| }, |
| { |
| "epoch": 0.67, |
| "learning_rate": 0.0001664804469273743, |
| "loss": 1.9999, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.68, |
| "learning_rate": 0.00016620111731843577, |
| "loss": 2.06, |
| "step": 121 |
| }, |
| { |
| "epoch": 0.68, |
| "learning_rate": 0.0001659217877094972, |
| "loss": 2.0177, |
| "step": 122 |
| }, |
| { |
| "epoch": 0.69, |
| "learning_rate": 0.00016564245810055868, |
| "loss": 2.0504, |
| "step": 123 |
| }, |
| { |
| "epoch": 0.69, |
| "learning_rate": 0.00016536312849162012, |
| "loss": 2.0585, |
| "step": 124 |
| }, |
| { |
| "epoch": 0.7, |
| "learning_rate": 0.00016508379888268158, |
| "loss": 2.0273, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.7, |
| "learning_rate": 0.00016480446927374302, |
| "loss": 2.0549, |
| "step": 126 |
| }, |
| { |
| "epoch": 0.71, |
| "learning_rate": 0.00016452513966480446, |
| "loss": 2.044, |
| "step": 127 |
| }, |
| { |
| "epoch": 0.72, |
| "learning_rate": 0.00016424581005586593, |
| "loss": 2.0731, |
| "step": 128 |
| }, |
| { |
| "epoch": 0.72, |
| "learning_rate": 0.00016396648044692737, |
| "loss": 2.0568, |
| "step": 129 |
| }, |
| { |
| "epoch": 0.73, |
| "learning_rate": 0.00016368715083798883, |
| "loss": 2.007, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.73, |
| "learning_rate": 0.0001634078212290503, |
| "loss": 2.0707, |
| "step": 131 |
| }, |
| { |
| "epoch": 0.74, |
| "learning_rate": 0.00016312849162011174, |
| "loss": 1.9793, |
| "step": 132 |
| }, |
| { |
| "epoch": 0.74, |
| "learning_rate": 0.0001628491620111732, |
| "loss": 2.1311, |
| "step": 133 |
| }, |
| { |
| "epoch": 0.75, |
| "learning_rate": 0.00016256983240223465, |
| "loss": 2.0016, |
| "step": 134 |
| }, |
| { |
| "epoch": 0.75, |
| "learning_rate": 0.00016229050279329609, |
| "loss": 1.9945, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.76, |
| "learning_rate": 0.00016201117318435755, |
| "loss": 2.0186, |
| "step": 136 |
| }, |
| { |
| "epoch": 0.77, |
| "learning_rate": 0.000161731843575419, |
| "loss": 2.0971, |
| "step": 137 |
| }, |
| { |
| "epoch": 0.77, |
| "learning_rate": 0.00016145251396648046, |
| "loss": 2.0883, |
| "step": 138 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 0.00016117318435754192, |
| "loss": 2.0803, |
| "step": 139 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 0.00016089385474860336, |
| "loss": 2.0617, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.79, |
| "learning_rate": 0.00016061452513966483, |
| "loss": 2.1265, |
| "step": 141 |
| }, |
| { |
| "epoch": 0.79, |
| "learning_rate": 0.00016033519553072627, |
| "loss": 2.0151, |
| "step": 142 |
| }, |
| { |
| "epoch": 0.8, |
| "learning_rate": 0.0001600558659217877, |
| "loss": 1.996, |
| "step": 143 |
| }, |
| { |
| "epoch": 0.8, |
| "learning_rate": 0.00015977653631284918, |
| "loss": 2.0164, |
| "step": 144 |
| }, |
| { |
| "epoch": 0.81, |
| "learning_rate": 0.00015949720670391061, |
| "loss": 2.0314, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.82, |
| "learning_rate": 0.00015921787709497208, |
| "loss": 1.9501, |
| "step": 146 |
| }, |
| { |
| "epoch": 0.82, |
| "learning_rate": 0.00015893854748603352, |
| "loss": 2.087, |
| "step": 147 |
| }, |
| { |
| "epoch": 0.83, |
| "learning_rate": 0.000158659217877095, |
| "loss": 2.0262, |
| "step": 148 |
| }, |
| { |
| "epoch": 0.83, |
| "learning_rate": 0.00015837988826815643, |
| "loss": 2.0765, |
| "step": 149 |
| }, |
| { |
| "epoch": 0.84, |
| "learning_rate": 0.0001581005586592179, |
| "loss": 2.105, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.84, |
| "learning_rate": 0.00015782122905027933, |
| "loss": 1.9863, |
| "step": 151 |
| }, |
| { |
| "epoch": 0.85, |
| "learning_rate": 0.0001575418994413408, |
| "loss": 1.9873, |
| "step": 152 |
| }, |
| { |
| "epoch": 0.85, |
| "learning_rate": 0.00015726256983240224, |
| "loss": 2.0094, |
| "step": 153 |
| }, |
| { |
| "epoch": 0.86, |
| "learning_rate": 0.00015698324022346368, |
| "loss": 1.9141, |
| "step": 154 |
| }, |
| { |
| "epoch": 0.87, |
| "learning_rate": 0.00015670391061452514, |
| "loss": 1.917, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.87, |
| "learning_rate": 0.00015642458100558658, |
| "loss": 2.109, |
| "step": 156 |
| }, |
| { |
| "epoch": 0.88, |
| "learning_rate": 0.00015614525139664805, |
| "loss": 1.9799, |
| "step": 157 |
| }, |
| { |
| "epoch": 0.88, |
| "learning_rate": 0.00015586592178770952, |
| "loss": 1.9571, |
| "step": 158 |
| }, |
| { |
| "epoch": 0.89, |
| "learning_rate": 0.00015558659217877096, |
| "loss": 1.9931, |
| "step": 159 |
| }, |
| { |
| "epoch": 0.89, |
| "learning_rate": 0.00015530726256983242, |
| "loss": 2.1004, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.9, |
| "learning_rate": 0.00015502793296089386, |
| "loss": 2.0385, |
| "step": 161 |
| }, |
| { |
| "epoch": 0.91, |
| "learning_rate": 0.0001547486033519553, |
| "loss": 1.9751, |
| "step": 162 |
| }, |
| { |
| "epoch": 0.91, |
| "learning_rate": 0.00015446927374301677, |
| "loss": 2.0544, |
| "step": 163 |
| }, |
| { |
| "epoch": 0.92, |
| "learning_rate": 0.0001541899441340782, |
| "loss": 2.0069, |
| "step": 164 |
| }, |
| { |
| "epoch": 0.92, |
| "learning_rate": 0.00015391061452513967, |
| "loss": 1.9576, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 0.00015363128491620114, |
| "loss": 1.8991, |
| "step": 166 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 0.00015335195530726258, |
| "loss": 1.9336, |
| "step": 167 |
| }, |
| { |
| "epoch": 0.94, |
| "learning_rate": 0.00015307262569832405, |
| "loss": 1.9736, |
| "step": 168 |
| }, |
| { |
| "epoch": 0.94, |
| "learning_rate": 0.00015279329608938548, |
| "loss": 1.9702, |
| "step": 169 |
| }, |
| { |
| "epoch": 0.95, |
| "learning_rate": 0.00015251396648044692, |
| "loss": 1.9055, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.96, |
| "learning_rate": 0.0001522346368715084, |
| "loss": 2.0503, |
| "step": 171 |
| }, |
| { |
| "epoch": 0.96, |
| "learning_rate": 0.00015195530726256983, |
| "loss": 2.0039, |
| "step": 172 |
| }, |
| { |
| "epoch": 0.97, |
| "learning_rate": 0.0001516759776536313, |
| "loss": 1.9406, |
| "step": 173 |
| }, |
| { |
| "epoch": 0.97, |
| "learning_rate": 0.00015139664804469274, |
| "loss": 2.0525, |
| "step": 174 |
| }, |
| { |
| "epoch": 0.98, |
| "learning_rate": 0.0001511173184357542, |
| "loss": 1.9234, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.98, |
| "learning_rate": 0.00015083798882681567, |
| "loss": 1.8614, |
| "step": 176 |
| }, |
| { |
| "epoch": 0.99, |
| "learning_rate": 0.0001505586592178771, |
| "loss": 1.9616, |
| "step": 177 |
| }, |
| { |
| "epoch": 0.99, |
| "learning_rate": 0.00015027932960893855, |
| "loss": 1.9509, |
| "step": 178 |
| }, |
| { |
| "epoch": 1.0, |
| "learning_rate": 0.00015000000000000001, |
| "loss": 1.9592, |
| "step": 179 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 0.00014972067039106145, |
| "loss": 1.8991, |
| "step": 180 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 0.00014944134078212292, |
| "loss": 1.9127, |
| "step": 181 |
| }, |
| { |
| "epoch": 1.02, |
| "learning_rate": 0.00014916201117318436, |
| "loss": 1.8982, |
| "step": 182 |
| }, |
| { |
| "epoch": 1.02, |
| "learning_rate": 0.0001488826815642458, |
| "loss": 1.9534, |
| "step": 183 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 0.0001486033519553073, |
| "loss": 1.7794, |
| "step": 184 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 0.00014832402234636873, |
| "loss": 1.7958, |
| "step": 185 |
| }, |
| { |
| "epoch": 1.04, |
| "learning_rate": 0.00014804469273743017, |
| "loss": 1.8282, |
| "step": 186 |
| }, |
| { |
| "epoch": 1.04, |
| "learning_rate": 0.00014776536312849164, |
| "loss": 2.0423, |
| "step": 187 |
| }, |
| { |
| "epoch": 1.05, |
| "learning_rate": 0.00014748603351955308, |
| "loss": 1.9282, |
| "step": 188 |
| }, |
| { |
| "epoch": 1.06, |
| "learning_rate": 0.00014720670391061454, |
| "loss": 1.9072, |
| "step": 189 |
| }, |
| { |
| "epoch": 1.06, |
| "learning_rate": 0.00014692737430167598, |
| "loss": 1.8665, |
| "step": 190 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 0.00014664804469273742, |
| "loss": 1.9021, |
| "step": 191 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 0.0001463687150837989, |
| "loss": 1.7308, |
| "step": 192 |
| }, |
| { |
| "epoch": 1.08, |
| "learning_rate": 0.00014608938547486035, |
| "loss": 1.9165, |
| "step": 193 |
| }, |
| { |
| "epoch": 1.08, |
| "learning_rate": 0.0001458100558659218, |
| "loss": 1.842, |
| "step": 194 |
| }, |
| { |
| "epoch": 1.09, |
| "learning_rate": 0.00014553072625698326, |
| "loss": 1.9128, |
| "step": 195 |
| }, |
| { |
| "epoch": 1.09, |
| "learning_rate": 0.0001452513966480447, |
| "loss": 1.8005, |
| "step": 196 |
| }, |
| { |
| "epoch": 1.1, |
| "learning_rate": 0.00014497206703910614, |
| "loss": 1.8547, |
| "step": 197 |
| }, |
| { |
| "epoch": 1.11, |
| "learning_rate": 0.0001446927374301676, |
| "loss": 1.9042, |
| "step": 198 |
| }, |
| { |
| "epoch": 1.11, |
| "learning_rate": 0.00014441340782122905, |
| "loss": 1.8609, |
| "step": 199 |
| }, |
| { |
| "epoch": 1.12, |
| "learning_rate": 0.0001441340782122905, |
| "loss": 1.9591, |
| "step": 200 |
| }, |
| { |
| "epoch": 1.12, |
| "learning_rate": 0.00014385474860335195, |
| "loss": 1.8722, |
| "step": 201 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 0.00014357541899441342, |
| "loss": 1.8535, |
| "step": 202 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 0.00014329608938547488, |
| "loss": 1.8676, |
| "step": 203 |
| }, |
| { |
| "epoch": 1.14, |
| "learning_rate": 0.00014301675977653632, |
| "loss": 1.8976, |
| "step": 204 |
| }, |
| { |
| "epoch": 1.15, |
| "learning_rate": 0.00014273743016759776, |
| "loss": 1.7723, |
| "step": 205 |
| }, |
| { |
| "epoch": 1.15, |
| "learning_rate": 0.00014245810055865923, |
| "loss": 1.8165, |
| "step": 206 |
| }, |
| { |
| "epoch": 1.16, |
| "learning_rate": 0.00014217877094972067, |
| "loss": 1.7811, |
| "step": 207 |
| }, |
| { |
| "epoch": 1.16, |
| "learning_rate": 0.00014189944134078214, |
| "loss": 1.908, |
| "step": 208 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 0.00014162011173184357, |
| "loss": 1.7663, |
| "step": 209 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 0.00014134078212290501, |
| "loss": 1.6779, |
| "step": 210 |
| }, |
| { |
| "epoch": 1.18, |
| "learning_rate": 0.0001410614525139665, |
| "loss": 1.9039, |
| "step": 211 |
| }, |
| { |
| "epoch": 1.18, |
| "learning_rate": 0.00014078212290502795, |
| "loss": 1.8033, |
| "step": 212 |
| }, |
| { |
| "epoch": 1.19, |
| "learning_rate": 0.00014050279329608939, |
| "loss": 1.8251, |
| "step": 213 |
| }, |
| { |
| "epoch": 1.2, |
| "learning_rate": 0.00014022346368715085, |
| "loss": 1.8505, |
| "step": 214 |
| }, |
| { |
| "epoch": 1.2, |
| "learning_rate": 0.0001399441340782123, |
| "loss": 1.8147, |
| "step": 215 |
| }, |
| { |
| "epoch": 1.21, |
| "learning_rate": 0.00013966480446927376, |
| "loss": 1.8419, |
| "step": 216 |
| }, |
| { |
| "epoch": 1.21, |
| "learning_rate": 0.0001393854748603352, |
| "loss": 1.8401, |
| "step": 217 |
| }, |
| { |
| "epoch": 1.22, |
| "learning_rate": 0.00013910614525139664, |
| "loss": 1.8912, |
| "step": 218 |
| }, |
| { |
| "epoch": 1.22, |
| "learning_rate": 0.0001388268156424581, |
| "loss": 1.7548, |
| "step": 219 |
| }, |
| { |
| "epoch": 1.23, |
| "learning_rate": 0.00013854748603351957, |
| "loss": 1.8741, |
| "step": 220 |
| }, |
| { |
| "epoch": 1.23, |
| "learning_rate": 0.000138268156424581, |
| "loss": 1.9549, |
| "step": 221 |
| }, |
| { |
| "epoch": 1.24, |
| "learning_rate": 0.00013798882681564248, |
| "loss": 1.9093, |
| "step": 222 |
| }, |
| { |
| "epoch": 1.25, |
| "learning_rate": 0.00013770949720670392, |
| "loss": 1.7896, |
| "step": 223 |
| }, |
| { |
| "epoch": 1.25, |
| "learning_rate": 0.00013743016759776538, |
| "loss": 1.8491, |
| "step": 224 |
| }, |
| { |
| "epoch": 1.26, |
| "learning_rate": 0.00013715083798882682, |
| "loss": 1.7851, |
| "step": 225 |
| }, |
| { |
| "epoch": 1.26, |
| "learning_rate": 0.00013687150837988826, |
| "loss": 1.6992, |
| "step": 226 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 0.00013659217877094973, |
| "loss": 1.9765, |
| "step": 227 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 0.00013631284916201117, |
| "loss": 1.8179, |
| "step": 228 |
| }, |
| { |
| "epoch": 1.28, |
| "learning_rate": 0.00013603351955307263, |
| "loss": 1.8548, |
| "step": 229 |
| }, |
| { |
| "epoch": 1.28, |
| "learning_rate": 0.0001357541899441341, |
| "loss": 1.8843, |
| "step": 230 |
| }, |
| { |
| "epoch": 1.29, |
| "learning_rate": 0.00013547486033519554, |
| "loss": 1.9105, |
| "step": 231 |
| }, |
| { |
| "epoch": 1.3, |
| "learning_rate": 0.00013519553072625698, |
| "loss": 1.8748, |
| "step": 232 |
| }, |
| { |
| "epoch": 1.3, |
| "learning_rate": 0.00013491620111731844, |
| "loss": 1.7976, |
| "step": 233 |
| }, |
| { |
| "epoch": 1.31, |
| "learning_rate": 0.00013463687150837988, |
| "loss": 1.7369, |
| "step": 234 |
| }, |
| { |
| "epoch": 1.31, |
| "learning_rate": 0.00013435754189944135, |
| "loss": 1.7808, |
| "step": 235 |
| }, |
| { |
| "epoch": 1.32, |
| "learning_rate": 0.0001340782122905028, |
| "loss": 1.8385, |
| "step": 236 |
| }, |
| { |
| "epoch": 1.32, |
| "learning_rate": 0.00013379888268156423, |
| "loss": 1.8295, |
| "step": 237 |
| }, |
| { |
| "epoch": 1.33, |
| "learning_rate": 0.00013351955307262572, |
| "loss": 1.757, |
| "step": 238 |
| }, |
| { |
| "epoch": 1.34, |
| "learning_rate": 0.00013324022346368716, |
| "loss": 1.7904, |
| "step": 239 |
| }, |
| { |
| "epoch": 1.34, |
| "learning_rate": 0.0001329608938547486, |
| "loss": 1.7632, |
| "step": 240 |
| }, |
| { |
| "epoch": 1.35, |
| "learning_rate": 0.00013268156424581007, |
| "loss": 1.7867, |
| "step": 241 |
| }, |
| { |
| "epoch": 1.35, |
| "learning_rate": 0.0001324022346368715, |
| "loss": 1.8259, |
| "step": 242 |
| }, |
| { |
| "epoch": 1.36, |
| "learning_rate": 0.00013212290502793297, |
| "loss": 1.6655, |
| "step": 243 |
| }, |
| { |
| "epoch": 1.36, |
| "learning_rate": 0.0001318435754189944, |
| "loss": 1.6848, |
| "step": 244 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 0.00013156424581005585, |
| "loss": 1.7931, |
| "step": 245 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 0.00013128491620111732, |
| "loss": 1.7868, |
| "step": 246 |
| }, |
| { |
| "epoch": 1.38, |
| "learning_rate": 0.00013100558659217879, |
| "loss": 1.7732, |
| "step": 247 |
| }, |
| { |
| "epoch": 1.39, |
| "learning_rate": 0.00013072625698324022, |
| "loss": 1.7851, |
| "step": 248 |
| }, |
| { |
| "epoch": 1.39, |
| "learning_rate": 0.0001304469273743017, |
| "loss": 1.7406, |
| "step": 249 |
| }, |
| { |
| "epoch": 1.4, |
| "learning_rate": 0.00013016759776536313, |
| "loss": 1.5853, |
| "step": 250 |
| }, |
| { |
| "epoch": 1.4, |
| "learning_rate": 0.0001298882681564246, |
| "loss": 1.8271, |
| "step": 251 |
| }, |
| { |
| "epoch": 1.41, |
| "learning_rate": 0.00012960893854748604, |
| "loss": 1.6054, |
| "step": 252 |
| }, |
| { |
| "epoch": 1.41, |
| "learning_rate": 0.00012932960893854748, |
| "loss": 1.6884, |
| "step": 253 |
| }, |
| { |
| "epoch": 1.42, |
| "learning_rate": 0.00012905027932960894, |
| "loss": 1.7333, |
| "step": 254 |
| }, |
| { |
| "epoch": 1.42, |
| "learning_rate": 0.00012877094972067038, |
| "loss": 1.803, |
| "step": 255 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 0.00012849162011173185, |
| "loss": 1.63, |
| "step": 256 |
| }, |
| { |
| "epoch": 1.44, |
| "learning_rate": 0.00012821229050279331, |
| "loss": 1.8023, |
| "step": 257 |
| }, |
| { |
| "epoch": 1.44, |
| "learning_rate": 0.00012793296089385475, |
| "loss": 1.762, |
| "step": 258 |
| }, |
| { |
| "epoch": 1.45, |
| "learning_rate": 0.00012765363128491622, |
| "loss": 1.7565, |
| "step": 259 |
| }, |
| { |
| "epoch": 1.45, |
| "learning_rate": 0.00012737430167597766, |
| "loss": 1.5937, |
| "step": 260 |
| }, |
| { |
| "epoch": 1.46, |
| "learning_rate": 0.0001270949720670391, |
| "loss": 1.7208, |
| "step": 261 |
| }, |
| { |
| "epoch": 1.46, |
| "learning_rate": 0.00012681564245810057, |
| "loss": 1.8097, |
| "step": 262 |
| }, |
| { |
| "epoch": 1.47, |
| "learning_rate": 0.000126536312849162, |
| "loss": 1.7042, |
| "step": 263 |
| }, |
| { |
| "epoch": 1.47, |
| "learning_rate": 0.00012625698324022347, |
| "loss": 1.7892, |
| "step": 264 |
| }, |
| { |
| "epoch": 1.48, |
| "learning_rate": 0.00012597765363128494, |
| "loss": 1.752, |
| "step": 265 |
| }, |
| { |
| "epoch": 1.49, |
| "learning_rate": 0.00012569832402234638, |
| "loss": 1.7333, |
| "step": 266 |
| }, |
| { |
| "epoch": 1.49, |
| "learning_rate": 0.00012541899441340784, |
| "loss": 1.7492, |
| "step": 267 |
| }, |
| { |
| "epoch": 1.5, |
| "learning_rate": 0.00012513966480446928, |
| "loss": 1.818, |
| "step": 268 |
| }, |
| { |
| "epoch": 1.5, |
| "learning_rate": 0.00012486033519553072, |
| "loss": 1.7146, |
| "step": 269 |
| }, |
| { |
| "epoch": 1.51, |
| "learning_rate": 0.0001245810055865922, |
| "loss": 1.7958, |
| "step": 270 |
| }, |
| { |
| "epoch": 1.51, |
| "learning_rate": 0.00012430167597765363, |
| "loss": 1.7212, |
| "step": 271 |
| }, |
| { |
| "epoch": 1.52, |
| "learning_rate": 0.0001240223463687151, |
| "loss": 1.8835, |
| "step": 272 |
| }, |
| { |
| "epoch": 1.53, |
| "learning_rate": 0.00012374301675977656, |
| "loss": 1.7486, |
| "step": 273 |
| }, |
| { |
| "epoch": 1.53, |
| "learning_rate": 0.000123463687150838, |
| "loss": 1.8103, |
| "step": 274 |
| }, |
| { |
| "epoch": 1.54, |
| "learning_rate": 0.00012318435754189944, |
| "loss": 1.7557, |
| "step": 275 |
| }, |
| { |
| "epoch": 1.54, |
| "learning_rate": 0.0001229050279329609, |
| "loss": 1.6198, |
| "step": 276 |
| }, |
| { |
| "epoch": 1.55, |
| "learning_rate": 0.00012262569832402235, |
| "loss": 1.6971, |
| "step": 277 |
| }, |
| { |
| "epoch": 1.55, |
| "learning_rate": 0.0001223463687150838, |
| "loss": 1.668, |
| "step": 278 |
| }, |
| { |
| "epoch": 1.56, |
| "learning_rate": 0.00012206703910614525, |
| "loss": 1.8795, |
| "step": 279 |
| }, |
| { |
| "epoch": 1.56, |
| "learning_rate": 0.0001217877094972067, |
| "loss": 1.6412, |
| "step": 280 |
| }, |
| { |
| "epoch": 1.57, |
| "learning_rate": 0.00012150837988826816, |
| "loss": 1.7497, |
| "step": 281 |
| }, |
| { |
| "epoch": 1.58, |
| "learning_rate": 0.00012122905027932962, |
| "loss": 1.5577, |
| "step": 282 |
| }, |
| { |
| "epoch": 1.58, |
| "learning_rate": 0.00012094972067039108, |
| "loss": 1.8049, |
| "step": 283 |
| }, |
| { |
| "epoch": 1.59, |
| "learning_rate": 0.00012067039106145253, |
| "loss": 1.6834, |
| "step": 284 |
| }, |
| { |
| "epoch": 1.59, |
| "learning_rate": 0.00012039106145251397, |
| "loss": 1.7978, |
| "step": 285 |
| }, |
| { |
| "epoch": 1.6, |
| "learning_rate": 0.00012011173184357542, |
| "loss": 1.6558, |
| "step": 286 |
| }, |
| { |
| "epoch": 1.6, |
| "learning_rate": 0.00011983240223463687, |
| "loss": 1.7561, |
| "step": 287 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 0.00011955307262569833, |
| "loss": 1.6807, |
| "step": 288 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 0.00011927374301675978, |
| "loss": 1.6937, |
| "step": 289 |
| }, |
| { |
| "epoch": 1.62, |
| "learning_rate": 0.00011899441340782122, |
| "loss": 1.6591, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.63, |
| "learning_rate": 0.0001187150837988827, |
| "loss": 1.6771, |
| "step": 291 |
| }, |
| { |
| "epoch": 1.63, |
| "learning_rate": 0.00011843575418994415, |
| "loss": 1.7743, |
| "step": 292 |
| }, |
| { |
| "epoch": 1.64, |
| "learning_rate": 0.00011815642458100559, |
| "loss": 1.5857, |
| "step": 293 |
| }, |
| { |
| "epoch": 1.64, |
| "learning_rate": 0.00011787709497206705, |
| "loss": 1.6999, |
| "step": 294 |
| }, |
| { |
| "epoch": 1.65, |
| "learning_rate": 0.0001175977653631285, |
| "loss": 1.5661, |
| "step": 295 |
| }, |
| { |
| "epoch": 1.65, |
| "learning_rate": 0.00011731843575418995, |
| "loss": 1.7235, |
| "step": 296 |
| }, |
| { |
| "epoch": 1.66, |
| "learning_rate": 0.0001170391061452514, |
| "loss": 1.607, |
| "step": 297 |
| }, |
| { |
| "epoch": 1.66, |
| "learning_rate": 0.00011675977653631284, |
| "loss": 1.68, |
| "step": 298 |
| }, |
| { |
| "epoch": 1.67, |
| "learning_rate": 0.0001164804469273743, |
| "loss": 1.6938, |
| "step": 299 |
| }, |
| { |
| "epoch": 1.68, |
| "learning_rate": 0.00011620111731843578, |
| "loss": 1.6315, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.68, |
| "learning_rate": 0.00011592178770949722, |
| "loss": 1.6802, |
| "step": 301 |
| }, |
| { |
| "epoch": 1.69, |
| "learning_rate": 0.00011564245810055867, |
| "loss": 1.7174, |
| "step": 302 |
| }, |
| { |
| "epoch": 1.69, |
| "learning_rate": 0.00011536312849162012, |
| "loss": 1.5212, |
| "step": 303 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 0.00011508379888268157, |
| "loss": 1.5808, |
| "step": 304 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 0.00011480446927374303, |
| "loss": 1.6152, |
| "step": 305 |
| }, |
| { |
| "epoch": 1.71, |
| "learning_rate": 0.00011452513966480447, |
| "loss": 1.5435, |
| "step": 306 |
| }, |
| { |
| "epoch": 1.72, |
| "learning_rate": 0.00011424581005586592, |
| "loss": 1.6603, |
| "step": 307 |
| }, |
| { |
| "epoch": 1.72, |
| "learning_rate": 0.00011396648044692737, |
| "loss": 1.685, |
| "step": 308 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 0.00011368715083798884, |
| "loss": 1.6002, |
| "step": 309 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 0.00011340782122905029, |
| "loss": 1.6046, |
| "step": 310 |
| }, |
| { |
| "epoch": 1.74, |
| "learning_rate": 0.00011312849162011174, |
| "loss": 1.5969, |
| "step": 311 |
| }, |
| { |
| "epoch": 1.74, |
| "learning_rate": 0.0001128491620111732, |
| "loss": 1.5845, |
| "step": 312 |
| }, |
| { |
| "epoch": 1.75, |
| "learning_rate": 0.00011256983240223464, |
| "loss": 1.8183, |
| "step": 313 |
| }, |
| { |
| "epoch": 1.75, |
| "learning_rate": 0.00011229050279329609, |
| "loss": 1.6953, |
| "step": 314 |
| }, |
| { |
| "epoch": 1.76, |
| "learning_rate": 0.00011201117318435754, |
| "loss": 1.7787, |
| "step": 315 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 0.000111731843575419, |
| "loss": 1.6422, |
| "step": 316 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 0.00011145251396648045, |
| "loss": 1.7034, |
| "step": 317 |
| }, |
| { |
| "epoch": 1.78, |
| "learning_rate": 0.00011117318435754192, |
| "loss": 1.7301, |
| "step": 318 |
| }, |
| { |
| "epoch": 1.78, |
| "learning_rate": 0.00011089385474860337, |
| "loss": 1.7084, |
| "step": 319 |
| }, |
| { |
| "epoch": 1.79, |
| "learning_rate": 0.00011061452513966482, |
| "loss": 1.772, |
| "step": 320 |
| }, |
| { |
| "epoch": 1.79, |
| "learning_rate": 0.00011033519553072626, |
| "loss": 1.5733, |
| "step": 321 |
| }, |
| { |
| "epoch": 1.8, |
| "learning_rate": 0.00011005586592178771, |
| "loss": 1.6423, |
| "step": 322 |
| }, |
| { |
| "epoch": 1.8, |
| "learning_rate": 0.00010977653631284917, |
| "loss": 1.5809, |
| "step": 323 |
| }, |
| { |
| "epoch": 1.81, |
| "learning_rate": 0.00010949720670391062, |
| "loss": 1.6781, |
| "step": 324 |
| }, |
| { |
| "epoch": 1.82, |
| "learning_rate": 0.00010921787709497207, |
| "loss": 1.6788, |
| "step": 325 |
| }, |
| { |
| "epoch": 1.82, |
| "learning_rate": 0.00010893854748603351, |
| "loss": 1.6346, |
| "step": 326 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 0.00010865921787709499, |
| "loss": 1.6634, |
| "step": 327 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 0.00010837988826815643, |
| "loss": 1.7561, |
| "step": 328 |
| }, |
| { |
| "epoch": 1.84, |
| "learning_rate": 0.00010810055865921788, |
| "loss": 1.66, |
| "step": 329 |
| }, |
| { |
| "epoch": 1.84, |
| "learning_rate": 0.00010782122905027934, |
| "loss": 1.7298, |
| "step": 330 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 0.00010754189944134079, |
| "loss": 1.6893, |
| "step": 331 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 0.00010726256983240224, |
| "loss": 1.7631, |
| "step": 332 |
| }, |
| { |
| "epoch": 1.86, |
| "learning_rate": 0.00010698324022346368, |
| "loss": 1.6633, |
| "step": 333 |
| }, |
| { |
| "epoch": 1.87, |
| "learning_rate": 0.00010670391061452513, |
| "loss": 1.5388, |
| "step": 334 |
| }, |
| { |
| "epoch": 1.87, |
| "learning_rate": 0.00010642458100558659, |
| "loss": 1.6718, |
| "step": 335 |
| }, |
| { |
| "epoch": 1.88, |
| "learning_rate": 0.00010614525139664805, |
| "loss": 1.5536, |
| "step": 336 |
| }, |
| { |
| "epoch": 1.88, |
| "learning_rate": 0.00010586592178770951, |
| "loss": 1.6483, |
| "step": 337 |
| }, |
| { |
| "epoch": 1.89, |
| "learning_rate": 0.00010558659217877096, |
| "loss": 1.5774, |
| "step": 338 |
| }, |
| { |
| "epoch": 1.89, |
| "learning_rate": 0.00010530726256983241, |
| "loss": 1.6366, |
| "step": 339 |
| }, |
| { |
| "epoch": 1.9, |
| "learning_rate": 0.00010502793296089387, |
| "loss": 1.5567, |
| "step": 340 |
| }, |
| { |
| "epoch": 1.91, |
| "learning_rate": 0.0001047486033519553, |
| "loss": 1.5323, |
| "step": 341 |
| }, |
| { |
| "epoch": 1.91, |
| "learning_rate": 0.00010446927374301676, |
| "loss": 1.4608, |
| "step": 342 |
| }, |
| { |
| "epoch": 1.92, |
| "learning_rate": 0.00010418994413407821, |
| "loss": 1.5933, |
| "step": 343 |
| }, |
| { |
| "epoch": 1.92, |
| "learning_rate": 0.00010391061452513966, |
| "loss": 1.6625, |
| "step": 344 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 0.00010363128491620113, |
| "loss": 1.7236, |
| "step": 345 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 0.00010335195530726258, |
| "loss": 1.759, |
| "step": 346 |
| }, |
| { |
| "epoch": 1.94, |
| "learning_rate": 0.00010307262569832404, |
| "loss": 1.7248, |
| "step": 347 |
| }, |
| { |
| "epoch": 1.94, |
| "learning_rate": 0.00010279329608938548, |
| "loss": 1.5144, |
| "step": 348 |
| }, |
| { |
| "epoch": 1.95, |
| "learning_rate": 0.00010251396648044693, |
| "loss": 1.6905, |
| "step": 349 |
| }, |
| { |
| "epoch": 1.96, |
| "learning_rate": 0.00010223463687150838, |
| "loss": 1.6119, |
| "step": 350 |
| }, |
| { |
| "epoch": 1.96, |
| "learning_rate": 0.00010195530726256983, |
| "loss": 1.5464, |
| "step": 351 |
| }, |
| { |
| "epoch": 1.97, |
| "learning_rate": 0.00010167597765363129, |
| "loss": 1.6901, |
| "step": 352 |
| }, |
| { |
| "epoch": 1.97, |
| "learning_rate": 0.00010139664804469273, |
| "loss": 1.3511, |
| "step": 353 |
| }, |
| { |
| "epoch": 1.98, |
| "learning_rate": 0.0001011173184357542, |
| "loss": 1.5434, |
| "step": 354 |
| }, |
| { |
| "epoch": 1.98, |
| "learning_rate": 0.00010083798882681566, |
| "loss": 1.5891, |
| "step": 355 |
| }, |
| { |
| "epoch": 1.99, |
| "learning_rate": 0.0001005586592178771, |
| "loss": 1.6658, |
| "step": 356 |
| }, |
| { |
| "epoch": 1.99, |
| "learning_rate": 0.00010027932960893855, |
| "loss": 1.5657, |
| "step": 357 |
| }, |
| { |
| "epoch": 2.0, |
| "learning_rate": 0.0001, |
| "loss": 1.7005, |
| "step": 358 |
| }, |
| { |
| "epoch": 2.01, |
| "learning_rate": 9.972067039106146e-05, |
| "loss": 1.4202, |
| "step": 359 |
| }, |
| { |
| "epoch": 2.01, |
| "learning_rate": 9.944134078212291e-05, |
| "loss": 1.5262, |
| "step": 360 |
| }, |
| { |
| "epoch": 2.02, |
| "learning_rate": 9.916201117318436e-05, |
| "loss": 1.6323, |
| "step": 361 |
| }, |
| { |
| "epoch": 2.02, |
| "learning_rate": 9.888268156424582e-05, |
| "loss": 1.5521, |
| "step": 362 |
| }, |
| { |
| "epoch": 2.03, |
| "learning_rate": 9.860335195530727e-05, |
| "loss": 1.5762, |
| "step": 363 |
| }, |
| { |
| "epoch": 2.03, |
| "learning_rate": 9.832402234636872e-05, |
| "loss": 1.613, |
| "step": 364 |
| }, |
| { |
| "epoch": 2.04, |
| "learning_rate": 9.804469273743018e-05, |
| "loss": 1.4231, |
| "step": 365 |
| }, |
| { |
| "epoch": 2.04, |
| "learning_rate": 9.776536312849163e-05, |
| "loss": 1.5706, |
| "step": 366 |
| }, |
| { |
| "epoch": 2.05, |
| "learning_rate": 9.748603351955308e-05, |
| "loss": 1.5245, |
| "step": 367 |
| }, |
| { |
| "epoch": 2.06, |
| "learning_rate": 9.720670391061453e-05, |
| "loss": 1.4771, |
| "step": 368 |
| }, |
| { |
| "epoch": 2.06, |
| "learning_rate": 9.692737430167597e-05, |
| "loss": 1.596, |
| "step": 369 |
| }, |
| { |
| "epoch": 2.07, |
| "learning_rate": 9.664804469273744e-05, |
| "loss": 1.537, |
| "step": 370 |
| }, |
| { |
| "epoch": 2.07, |
| "learning_rate": 9.636871508379889e-05, |
| "loss": 1.4276, |
| "step": 371 |
| }, |
| { |
| "epoch": 2.08, |
| "learning_rate": 9.608938547486033e-05, |
| "loss": 1.4746, |
| "step": 372 |
| }, |
| { |
| "epoch": 2.08, |
| "learning_rate": 9.581005586592178e-05, |
| "loss": 1.4374, |
| "step": 373 |
| }, |
| { |
| "epoch": 2.09, |
| "learning_rate": 9.553072625698325e-05, |
| "loss": 1.4704, |
| "step": 374 |
| }, |
| { |
| "epoch": 2.09, |
| "learning_rate": 9.52513966480447e-05, |
| "loss": 1.5997, |
| "step": 375 |
| }, |
| { |
| "epoch": 2.1, |
| "learning_rate": 9.497206703910614e-05, |
| "loss": 1.5034, |
| "step": 376 |
| }, |
| { |
| "epoch": 2.11, |
| "learning_rate": 9.46927374301676e-05, |
| "loss": 1.6392, |
| "step": 377 |
| }, |
| { |
| "epoch": 2.11, |
| "learning_rate": 9.441340782122905e-05, |
| "loss": 1.5611, |
| "step": 378 |
| }, |
| { |
| "epoch": 2.12, |
| "learning_rate": 9.413407821229052e-05, |
| "loss": 1.3384, |
| "step": 379 |
| }, |
| { |
| "epoch": 2.12, |
| "learning_rate": 9.385474860335196e-05, |
| "loss": 1.5732, |
| "step": 380 |
| }, |
| { |
| "epoch": 2.13, |
| "learning_rate": 9.357541899441341e-05, |
| "loss": 1.3874, |
| "step": 381 |
| }, |
| { |
| "epoch": 2.13, |
| "learning_rate": 9.329608938547486e-05, |
| "loss": 1.4139, |
| "step": 382 |
| }, |
| { |
| "epoch": 2.14, |
| "learning_rate": 9.301675977653633e-05, |
| "loss": 1.5195, |
| "step": 383 |
| }, |
| { |
| "epoch": 2.15, |
| "learning_rate": 9.273743016759777e-05, |
| "loss": 1.4371, |
| "step": 384 |
| }, |
| { |
| "epoch": 2.15, |
| "learning_rate": 9.245810055865922e-05, |
| "loss": 1.4411, |
| "step": 385 |
| }, |
| { |
| "epoch": 2.16, |
| "learning_rate": 9.217877094972067e-05, |
| "loss": 1.5214, |
| "step": 386 |
| }, |
| { |
| "epoch": 2.16, |
| "learning_rate": 9.189944134078213e-05, |
| "loss": 1.4971, |
| "step": 387 |
| }, |
| { |
| "epoch": 2.17, |
| "learning_rate": 9.162011173184358e-05, |
| "loss": 1.3453, |
| "step": 388 |
| }, |
| { |
| "epoch": 2.17, |
| "learning_rate": 9.134078212290503e-05, |
| "loss": 1.394, |
| "step": 389 |
| }, |
| { |
| "epoch": 2.18, |
| "learning_rate": 9.106145251396648e-05, |
| "loss": 1.5058, |
| "step": 390 |
| }, |
| { |
| "epoch": 2.18, |
| "learning_rate": 9.078212290502794e-05, |
| "loss": 1.4855, |
| "step": 391 |
| }, |
| { |
| "epoch": 2.19, |
| "learning_rate": 9.050279329608939e-05, |
| "loss": 1.4647, |
| "step": 392 |
| }, |
| { |
| "epoch": 2.2, |
| "learning_rate": 9.022346368715084e-05, |
| "loss": 1.3435, |
| "step": 393 |
| }, |
| { |
| "epoch": 2.2, |
| "learning_rate": 8.99441340782123e-05, |
| "loss": 1.5815, |
| "step": 394 |
| }, |
| { |
| "epoch": 2.21, |
| "learning_rate": 8.966480446927375e-05, |
| "loss": 1.4742, |
| "step": 395 |
| }, |
| { |
| "epoch": 2.21, |
| "learning_rate": 8.938547486033519e-05, |
| "loss": 1.5389, |
| "step": 396 |
| }, |
| { |
| "epoch": 2.22, |
| "learning_rate": 8.910614525139666e-05, |
| "loss": 1.384, |
| "step": 397 |
| }, |
| { |
| "epoch": 2.22, |
| "learning_rate": 8.882681564245811e-05, |
| "loss": 1.3967, |
| "step": 398 |
| }, |
| { |
| "epoch": 2.23, |
| "learning_rate": 8.854748603351956e-05, |
| "loss": 1.442, |
| "step": 399 |
| }, |
| { |
| "epoch": 2.23, |
| "learning_rate": 8.8268156424581e-05, |
| "loss": 1.396, |
| "step": 400 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 716, |
| "num_train_epochs": 4, |
| "save_steps": 100, |
| "total_flos": 2.056923590572032e+17, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|