| { | |
| "best_metric": 1.9499354362487793, | |
| "best_model_checkpoint": "output/john-lennon/checkpoint-506", | |
| "epoch": 2.0, | |
| "global_step": 506, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00013706782378281954, | |
| "loss": 3.1396, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0001366718044768288, | |
| "loss": 2.944, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00013601346815589937, | |
| "loss": 2.9219, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00013509535174144625, | |
| "loss": 2.7004, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00013392099322631472, | |
| "loss": 2.7631, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001324949180410046, | |
| "loss": 2.7551, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0001308226216147698, | |
| "loss": 2.7797, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0001289105481987947, | |
| "loss": 2.8361, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.0001267660660330529, | |
| "loss": 2.7943, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00012439743895254382, | |
| "loss": 2.64, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0001218137945423232, | |
| "loss": 2.5387, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00011902508896404359, | |
| "loss": 2.693, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001160420685895466, | |
| "loss": 2.9437, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0001128762285893533, | |
| "loss": 2.5893, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00010953976863563352, | |
| "loss": 2.5721, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00010604554589035391, | |
| "loss": 2.5719, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00010240702545976687, | |
| "loss": 2.757, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 9.863822850616538e-05, | |
| "loss": 2.3807, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 9.475367821685696e-05, | |
| "loss": 2.2877, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 9.076834383856722e-05, | |
| "loss": 2.4833, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 8.669758299293816e-05, | |
| "loss": 2.5693, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 8.255708249541069e-05, | |
| "loss": 2.4457, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 7.836279790554737e-05, | |
| "loss": 2.5813, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 7.413089204174082e-05, | |
| "loss": 2.3967, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 6.987767269724301e-05, | |
| "loss": 2.4554, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 6.561952979752895e-05, | |
| "loss": 2.5141, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 6.137287224116132e-05, | |
| "loss": 2.6133, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 5.715406466754129e-05, | |
| "loss": 2.5404, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 5.297936439521389e-05, | |
| "loss": 2.3175, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 4.8864858773737654e-05, | |
| "loss": 2.4477, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 4.482640319053576e-05, | |
| "loss": 2.424, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.0879559971621006e-05, | |
| "loss": 2.6316, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 3.703953841164296e-05, | |
| "loss": 2.3138, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 3.33211361643535e-05, | |
| "loss": 2.2384, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 2.973868221934456e-05, | |
| "loss": 2.1915, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 2.6305981684799012e-05, | |
| "loss": 2.4846, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 2.303626258903672e-05, | |
| "loss": 2.1743, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 1.99421249058571e-05, | |
| "loss": 2.2715, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 1.703549200011139e-05, | |
| "loss": 2.4405, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.4327564680610529e-05, | |
| "loss": 2.2981, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.1828778037427104e-05, | |
| "loss": 2.3665, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.548761229920845e-06, | |
| "loss": 2.1714, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 7.496300380445175e-06, | |
| "loss": 2.5874, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 5.679304716725937e-06, | |
| "loss": 2.416, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 4.104776093383014e-06, | |
| "loss": 2.2762, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 2.778782010045488e-06, | |
| "loss": 2.3586, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 1.7064322300351674e-06, | |
| "loss": 2.2319, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 8.918590897198837e-07, | |
| "loss": 2.479, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 3.382015744146126e-07, | |
| "loss": 2.2518, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 4.7593222194759165e-08, | |
| "loss": 2.0598, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 2.1660313606262207, | |
| "eval_runtime": 15.7786, | |
| "eval_samples_per_second": 20.724, | |
| "eval_steps_per_second": 2.598, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 2.1153902234600496e-08, | |
| "loss": 2.2301, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 2.589854993532899e-07, | |
| "loss": 2.306, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 7.601715213982628e-07, | |
| "loss": 2.2196, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 1.522780630978913e-06, | |
| "loss": 2.4015, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 2.5438740879409643e-06, | |
| "loss": 2.1129, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 3.819517073901653e-06, | |
| "loss": 2.0746, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 5.344793855206241e-06, | |
| "loss": 2.1512, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 7.1138267258749755e-06, | |
| "loss": 2.1905, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 9.119798657543025e-06, | |
| "loss": 2.3491, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 1.1354979569111356e-05, | |
| "loss": 2.17, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 1.3810756114877161e-05, | |
| "loss": 2.0833, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 1.647766487635476e-05, | |
| "loss": 2.1924, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 1.9345428829880624e-05, | |
| "loss": 2.1825, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 2.240299694947395e-05, | |
| "loss": 2.1866, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 2.5638586792341077e-05, | |
| "loss": 2.2806, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 2.9039729902920112e-05, | |
| "loss": 2.0872, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 3.2593319860498214e-05, | |
| "loss": 2.2949, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 3.628566278525032e-05, | |
| "loss": 2.1108, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 4.0102530108070535e-05, | |
| "loss": 2.2652, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 4.4029213400848006e-05, | |
| "loss": 2.1869, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 4.8050581055894354e-05, | |
| "loss": 1.8115, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 5.215113659610952e-05, | |
| "loss": 2.0593, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 5.631507839118308e-05, | |
| "loss": 2.315, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 6.052636054971471e-05, | |
| "loss": 2.1821, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 6.476875475260261e-05, | |
| "loss": 2.1544, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 6.902591278942309e-05, | |
| "loss": 2.0091, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 7.328142955681643e-05, | |
| "loss": 2.2613, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 7.75189062761101e-05, | |
| "loss": 2.0543, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 8.172201368657108e-05, | |
| "loss": 2.0958, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 8.587455497076717e-05, | |
| "loss": 1.9326, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 8.996052816955487e-05, | |
| "loss": 2.2574, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 9.396418784617256e-05, | |
| "loss": 2.2874, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 9.787010576181766e-05, | |
| "loss": 2.1276, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 0.0001016632303288892, | |
| "loss": 2.0318, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 0.00010532894461279352, | |
| "loss": 1.9958, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 0.00010885312265879837, | |
| "loss": 2.1824, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 0.00011222218392688076, | |
| "loss": 2.0295, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 0.00011542314562479964, | |
| "loss": 1.9594, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 0.00011844367273772802, | |
| "loss": 2.1575, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 0.00012127212556165186, | |
| "loss": 2.1388, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 0.00012389760455736598, | |
| "loss": 2.1449, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 0.000126309992352219, | |
| "loss": 2.253, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 0.0001284999927277534, | |
| "loss": 1.9017, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 0.00013045916644299936, | |
| "loss": 1.9045, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 0.00013217996375537733, | |
| "loss": 1.8158, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 0.0001336557535138877, | |
| "loss": 2.1949, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 0.00013488084871247707, | |
| "loss": 2.0855, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 0.00013585052840510938, | |
| "loss": 2.3929, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 0.00013656105589809228, | |
| "loss": 2.0561, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 0.0001370096931495533, | |
| "loss": 1.956, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 0.00013719471132057743, | |
| "loss": 1.8902, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 1.9499354362487793, | |
| "eval_runtime": 15.8099, | |
| "eval_samples_per_second": 20.683, | |
| "eval_steps_per_second": 2.593, | |
| "step": 506 | |
| } | |
| ], | |
| "max_steps": 506, | |
| "num_train_epochs": 2, | |
| "total_flos": 527287320576000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |