| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.7210884353741496, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0, | |
| "loss": 1.1564, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.0, | |
| "loss": 1.4501, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.0, | |
| "loss": 0.9663, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0, | |
| "loss": 1.1779, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0, | |
| "loss": 1.0604, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0, | |
| "loss": 0.9114, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0, | |
| "loss": 1.0516, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0, | |
| "loss": 1.2253, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 1.3333333333333334e-06, | |
| "loss": 0.9658, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.666666666666667e-06, | |
| "loss": 0.9407, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.9526, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 5.333333333333334e-06, | |
| "loss": 0.8532, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.9836, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.6827, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 9.333333333333334e-06, | |
| "loss": 1.0519, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 1.0666666666666667e-05, | |
| "loss": 0.7011, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.8513, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 0.781, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 1.4666666666666666e-05, | |
| "loss": 0.7113, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.791, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 1.7333333333333336e-05, | |
| "loss": 0.7299, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 1.866666666666667e-05, | |
| "loss": 0.7265, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7213, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 1.999447456932676e-05, | |
| "loss": 0.7056, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 1.997790438338385e-05, | |
| "loss": 0.7687, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 1.9950307753654016e-05, | |
| "loss": 0.6776, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 1.991171517679013e-05, | |
| "loss": 0.6322, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 1.9862169300913784e-05, | |
| "loss": 0.7253, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.9801724878485438e-05, | |
| "loss": 0.6503, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 1.973044870579824e-05, | |
| "loss": 0.7397, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.964841954916235e-05, | |
| "loss": 0.8342, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 1.955572805786141e-05, | |
| "loss": 0.7061, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 1.945247666397725e-05, | |
| "loss": 0.6767, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 1.9338779469193638e-05, | |
| "loss": 0.7581, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 1.921476211870408e-05, | |
| "loss": 0.6546, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.908056166236305e-05, | |
| "loss": 0.8286, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 1.8936326403234125e-05, | |
| "loss": 0.8036, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 1.8782215733702286e-05, | |
| "loss": 0.7079, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 1.8618399959331642e-05, | |
| "loss": 0.6375, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 1.844506011066308e-05, | |
| "loss": 0.4525, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 1.826238774315995e-05, | |
| "loss": 0.5356, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 1.8070584725522763e-05, | |
| "loss": 0.536, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 1.7869863016606893e-05, | |
| "loss": 0.6215, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.4808, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 1.7442560394846518e-05, | |
| "loss": 0.5008, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 1.7216451688204623e-05, | |
| "loss": 0.6458, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 1.698236818086073e-05, | |
| "loss": 0.5105, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 1.6740568555253153e-05, | |
| "loss": 0.4312, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 1.649132002079552e-05, | |
| "loss": 0.5643, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 1.6234898018587336e-05, | |
| "loss": 0.6065, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 1.5971585917027864e-05, | |
| "loss": 0.4018, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 1.570167469866962e-05, | |
| "loss": 0.496, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 1.5425462638657597e-05, | |
| "loss": 0.429, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 1.5143254975109538e-05, | |
| "loss": 0.6738, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 1.4855363571801523e-05, | |
| "loss": 0.5518, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 1.4562106573531632e-05, | |
| "loss": 0.5336, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 1.4263808054542541e-05, | |
| "loss": 0.6872, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 1.396079766039157e-05, | |
| "loss": 0.5226, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 1.3653410243663953e-05, | |
| "loss": 0.6211, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 1.3341985493931877e-05, | |
| "loss": 0.5429, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 1.3026867562368262e-05, | |
| "loss": 0.6971, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 1.2708404681430054e-05, | |
| "loss": 0.5812, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 1.238694878003138e-05, | |
| "loss": 0.4746, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 1.2062855094631777e-05, | |
| "loss": 0.4972, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.4216, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 1.1408189496772369e-05, | |
| "loss": 0.5295, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 1.1078341046187588e-05, | |
| "loss": 0.4123, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 1.0747300935864245e-05, | |
| "loss": 0.5449, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 1.0415434993638269e-05, | |
| "loss": 0.5406, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 1.0083109959960974e-05, | |
| "loss": 0.553, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 9.750693082619274e-06, | |
| "loss": 0.4892, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 9.418551710895243e-06, | |
| "loss": 0.5435, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 9.087052889613519e-06, | |
| "loss": 0.4862, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 8.756562953525151e-06, | |
| "loss": 0.4767, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 8.427447122476148e-06, | |
| "loss": 0.3959, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 8.100069097808103e-06, | |
| "loss": 0.3562, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 7.774790660436857e-06, | |
| "loss": 0.3327, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 7.451971271053455e-06, | |
| "loss": 0.352, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 7.131967672889101e-06, | |
| "loss": 0.36, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 6.815133497483157e-06, | |
| "loss": 0.391, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 6.501818873889856e-06, | |
| "loss": 0.3324, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 6.192370041755505e-06, | |
| "loss": 0.2525, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 5.887128968693887e-06, | |
| "loss": 0.2986, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 5.586432972382561e-06, | |
| "loss": 0.3165, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 5.290614347797802e-06, | |
| "loss": 0.3816, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.3999, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 4.714911082875446e-06, | |
| "loss": 0.3504, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 4.435662644233594e-06, | |
| "loss": 0.3623, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 4.162563277652104e-06, | |
| "loss": 0.4049, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 3.89591478145437e-06, | |
| "loss": 0.3554, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 3.636011825196365e-06, | |
| "loss": 0.4636, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 3.3831416240314085e-06, | |
| "loss": 0.2884, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 3.1375836213126653e-06, | |
| "loss": 0.2063, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 2.8996091797841976e-06, | |
| "loss": 0.4578, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 2.669481281701739e-06, | |
| "loss": 0.2891, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 2.447454238214654e-06, | |
| "loss": 0.3871, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 2.2337734083302164e-06, | |
| "loss": 0.3701, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 2.0286749277707783e-06, | |
| "loss": 0.236, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 1.8323854480234348e-06, | |
| "loss": 0.2797, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 1.6451218858706374e-06, | |
| "loss": 0.2397, | |
| "step": 200 | |
| } | |
| ], | |
| "max_steps": 219, | |
| "num_train_epochs": 3, | |
| "total_flos": 1906858253312.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |