| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.5586592178770949, | |
| "eval_steps": 500, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.00019972067039106145, | |
| "loss": 2.6443, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.00019944134078212292, | |
| "loss": 2.4104, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00019916201117318435, | |
| "loss": 2.4975, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00019888268156424582, | |
| "loss": 2.3513, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0001986033519553073, | |
| "loss": 2.4274, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00019832402234636873, | |
| "loss": 2.3628, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0001980446927374302, | |
| "loss": 2.3567, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019776536312849163, | |
| "loss": 2.4121, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019748603351955307, | |
| "loss": 2.4033, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019720670391061454, | |
| "loss": 2.2805, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019692737430167598, | |
| "loss": 2.2639, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019664804469273744, | |
| "loss": 2.2724, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019636871508379888, | |
| "loss": 2.332, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019608938547486035, | |
| "loss": 2.2261, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019581005586592182, | |
| "loss": 2.2208, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019553072625698326, | |
| "loss": 2.3351, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0001952513966480447, | |
| "loss": 2.2475, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019497206703910616, | |
| "loss": 2.3283, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001946927374301676, | |
| "loss": 2.1346, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019441340782122907, | |
| "loss": 2.131, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001941340782122905, | |
| "loss": 2.1718, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019385474860335195, | |
| "loss": 2.2446, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001935754189944134, | |
| "loss": 2.306, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019329608938547488, | |
| "loss": 2.1908, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019301675977653632, | |
| "loss": 2.2844, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019273743016759779, | |
| "loss": 2.2235, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019245810055865922, | |
| "loss": 2.1842, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019217877094972066, | |
| "loss": 2.2675, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019189944134078213, | |
| "loss": 2.2532, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00019162011173184357, | |
| "loss": 2.1788, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00019134078212290504, | |
| "loss": 2.2494, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.0001910614525139665, | |
| "loss": 2.1995, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019078212290502794, | |
| "loss": 2.1451, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0001905027932960894, | |
| "loss": 2.223, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00019022346368715085, | |
| "loss": 2.2854, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0001899441340782123, | |
| "loss": 2.2265, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00018966480446927375, | |
| "loss": 2.1214, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0001893854748603352, | |
| "loss": 2.1898, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018910614525139666, | |
| "loss": 2.1974, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0001888268156424581, | |
| "loss": 2.2259, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018854748603351957, | |
| "loss": 2.2094, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018826815642458103, | |
| "loss": 2.1731, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018798882681564247, | |
| "loss": 2.2373, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0001877094972067039, | |
| "loss": 2.2295, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018743016759776538, | |
| "loss": 2.1947, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00018715083798882682, | |
| "loss": 2.2115, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00018687150837988828, | |
| "loss": 2.1224, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018659217877094972, | |
| "loss": 2.2137, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018631284916201116, | |
| "loss": 2.2338, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018603351955307266, | |
| "loss": 2.1298, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0001857541899441341, | |
| "loss": 2.0883, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00018547486033519553, | |
| "loss": 2.1216, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.000185195530726257, | |
| "loss": 2.2112, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00018491620111731844, | |
| "loss": 2.1224, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.0001846368715083799, | |
| "loss": 2.2375, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00018435754189944135, | |
| "loss": 2.2235, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018407821229050279, | |
| "loss": 2.1682, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018379888268156425, | |
| "loss": 2.2077, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00018351955307262572, | |
| "loss": 2.1596, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00018324022346368716, | |
| "loss": 2.1311, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00018296089385474862, | |
| "loss": 2.1333, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00018268156424581006, | |
| "loss": 2.0901, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00018240223463687153, | |
| "loss": 2.1971, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00018212290502793297, | |
| "loss": 2.2602, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.0001818435754189944, | |
| "loss": 2.2194, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00018156424581005588, | |
| "loss": 2.1218, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00018128491620111731, | |
| "loss": 2.2049, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00018100558659217878, | |
| "loss": 2.1521, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00018072625698324025, | |
| "loss": 2.112, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001804469273743017, | |
| "loss": 2.1906, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00018016759776536313, | |
| "loss": 2.1717, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0001798882681564246, | |
| "loss": 2.0712, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017960893854748603, | |
| "loss": 2.141, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.0001793296089385475, | |
| "loss": 2.0656, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017905027932960894, | |
| "loss": 2.1125, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017877094972067038, | |
| "loss": 2.0869, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017849162011173187, | |
| "loss": 2.2478, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0001782122905027933, | |
| "loss": 2.1535, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00017793296089385475, | |
| "loss": 2.1927, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017765363128491622, | |
| "loss": 2.1213, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017737430167597766, | |
| "loss": 2.0981, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017709497206703912, | |
| "loss": 2.1828, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017681564245810056, | |
| "loss": 2.0562, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.000176536312849162, | |
| "loss": 2.1334, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00017625698324022347, | |
| "loss": 2.1225, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00017597765363128493, | |
| "loss": 2.2098, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00017569832402234637, | |
| "loss": 2.1519, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00017541899441340784, | |
| "loss": 2.1132, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00017513966480446928, | |
| "loss": 2.0333, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00017486033519553075, | |
| "loss": 2.2764, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00017458100558659218, | |
| "loss": 2.1838, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00017430167597765362, | |
| "loss": 2.1386, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.0001740223463687151, | |
| "loss": 2.1034, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00017374301675977656, | |
| "loss": 2.0346, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.000173463687150838, | |
| "loss": 2.0274, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00017318435754189946, | |
| "loss": 2.1036, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.0001729050279329609, | |
| "loss": 2.1208, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00017262569832402237, | |
| "loss": 2.0572, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.0001723463687150838, | |
| "loss": 2.1702, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00017206703910614525, | |
| "loss": 2.1302, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 716, | |
| "num_train_epochs": 4, | |
| "save_steps": 100, | |
| "total_flos": 5.117136720224256e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |