| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 51890, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.95985096678872e-05, | |
| "loss": 8.2502, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.91970193357744e-05, | |
| "loss": 0.4362, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.879552900366159e-05, | |
| "loss": 0.2711, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 4.839403867154879e-05, | |
| "loss": 0.2293, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 4.799254833943599e-05, | |
| "loss": 0.2035, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 4.759105800732319e-05, | |
| "loss": 0.1796, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 4.7189567675210385e-05, | |
| "loss": 0.1772, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 4.678807734309758e-05, | |
| "loss": 0.1613, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 4.6386587010984776e-05, | |
| "loss": 0.1587, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 4.5985096678871974e-05, | |
| "loss": 0.1529, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 4.558360634675917e-05, | |
| "loss": 0.1462, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 4.518211601464637e-05, | |
| "loss": 0.1477, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.478062568253357e-05, | |
| "loss": 0.1385, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 4.437913535042076e-05, | |
| "loss": 0.137, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 4.397764501830796e-05, | |
| "loss": 0.1299, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 4.357615468619516e-05, | |
| "loss": 0.1336, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 4.317466435408236e-05, | |
| "loss": 0.1288, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 4.2773174021969556e-05, | |
| "loss": 0.1322, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 4.237168368985675e-05, | |
| "loss": 0.1213, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 4.1970193357743946e-05, | |
| "loss": 0.1286, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.14025843143463135, | |
| "eval_runtime": 124.3791, | |
| "eval_samples_per_second": 89.798, | |
| "eval_steps_per_second": 11.232, | |
| "step": 10378 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 4.1568703025631145e-05, | |
| "loss": 0.1178, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 4.116721269351834e-05, | |
| "loss": 0.1113, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 4.076572236140554e-05, | |
| "loss": 0.1137, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 4.0364232029292733e-05, | |
| "loss": 0.1077, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 3.996274169717993e-05, | |
| "loss": 0.1075, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 3.956125136506713e-05, | |
| "loss": 0.106, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 3.915976103295433e-05, | |
| "loss": 0.107, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 3.875827070084153e-05, | |
| "loss": 0.1057, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 3.8356780368728726e-05, | |
| "loss": 0.0984, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 3.795529003661592e-05, | |
| "loss": 0.1016, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 3.7553799704503117e-05, | |
| "loss": 0.1019, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 3.7152309372390315e-05, | |
| "loss": 0.1023, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 3.6750819040277514e-05, | |
| "loss": 0.1004, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 3.634932870816471e-05, | |
| "loss": 0.1008, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 3.5947838376051904e-05, | |
| "loss": 0.0962, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 3.55463480439391e-05, | |
| "loss": 0.1001, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 3.51448577118263e-05, | |
| "loss": 0.0942, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 3.47433673797135e-05, | |
| "loss": 0.0975, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 3.43418770476007e-05, | |
| "loss": 0.1006, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 3.394038671548789e-05, | |
| "loss": 0.0932, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 3.353889638337509e-05, | |
| "loss": 0.0937, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.12487868219614029, | |
| "eval_runtime": 124.206, | |
| "eval_samples_per_second": 89.923, | |
| "eval_steps_per_second": 11.247, | |
| "step": 20756 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 3.313740605126229e-05, | |
| "loss": 0.0927, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 3.2735915719149485e-05, | |
| "loss": 0.0833, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 3.2334425387036684e-05, | |
| "loss": 0.0838, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 3.193293505492388e-05, | |
| "loss": 0.0833, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 3.1531444722811074e-05, | |
| "loss": 0.0807, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 3.112995439069827e-05, | |
| "loss": 0.0802, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 3.072846405858547e-05, | |
| "loss": 0.0818, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 3.032697372647267e-05, | |
| "loss": 0.0834, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 2.9925483394359865e-05, | |
| "loss": 0.0815, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 2.9523993062247064e-05, | |
| "loss": 0.0831, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 2.912250273013426e-05, | |
| "loss": 0.0835, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 2.8721012398021457e-05, | |
| "loss": 0.0812, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 2.8319522065908656e-05, | |
| "loss": 0.0782, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 2.791803173379585e-05, | |
| "loss": 0.084, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 2.751654140168305e-05, | |
| "loss": 0.0821, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 2.7115051069570248e-05, | |
| "loss": 0.0788, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 2.6713560737457443e-05, | |
| "loss": 0.0838, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "learning_rate": 2.6312070405344642e-05, | |
| "loss": 0.0822, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 2.5910580073231837e-05, | |
| "loss": 0.0801, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 2.5509089741119035e-05, | |
| "loss": 0.0799, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 2.5107599409006234e-05, | |
| "loss": 0.0809, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.11599379032850266, | |
| "eval_runtime": 124.1737, | |
| "eval_samples_per_second": 89.947, | |
| "eval_steps_per_second": 11.25, | |
| "step": 31134 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 2.470610907689343e-05, | |
| "loss": 0.0754, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 2.4304618744780628e-05, | |
| "loss": 0.0693, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "learning_rate": 2.3903128412667826e-05, | |
| "loss": 0.0726, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 2.350163808055502e-05, | |
| "loss": 0.0689, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 2.310014774844222e-05, | |
| "loss": 0.0718, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 2.2698657416329415e-05, | |
| "loss": 0.07, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "learning_rate": 2.2297167084216614e-05, | |
| "loss": 0.0753, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "learning_rate": 2.1895676752103812e-05, | |
| "loss": 0.071, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 2.1494186419991007e-05, | |
| "loss": 0.0688, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 3.47, | |
| "learning_rate": 2.1092696087878206e-05, | |
| "loss": 0.0692, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 2.06912057557654e-05, | |
| "loss": 0.0696, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 2.02897154236526e-05, | |
| "loss": 0.0714, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 1.9888225091539798e-05, | |
| "loss": 0.0682, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "learning_rate": 1.9486734759426993e-05, | |
| "loss": 0.0675, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 1.9085244427314192e-05, | |
| "loss": 0.0689, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.868375409520139e-05, | |
| "loss": 0.0727, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 1.8282263763088585e-05, | |
| "loss": 0.0688, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 1.7880773430975784e-05, | |
| "loss": 0.0677, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 1.747928309886298e-05, | |
| "loss": 0.0664, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 1.7077792766750178e-05, | |
| "loss": 0.0677, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 1.6676302434637376e-05, | |
| "loss": 0.0712, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.11910858005285263, | |
| "eval_runtime": 124.1925, | |
| "eval_samples_per_second": 89.933, | |
| "eval_steps_per_second": 11.249, | |
| "step": 41512 | |
| }, | |
| { | |
| "epoch": 4.05, | |
| "learning_rate": 1.627481210252457e-05, | |
| "loss": 0.064, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 1.587332177041177e-05, | |
| "loss": 0.0642, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 4.14, | |
| "learning_rate": 1.547183143829897e-05, | |
| "loss": 0.0603, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 1.5070341106186164e-05, | |
| "loss": 0.059, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 4.24, | |
| "learning_rate": 1.4668850774073362e-05, | |
| "loss": 0.0636, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 1.4267360441960559e-05, | |
| "loss": 0.0582, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 4.34, | |
| "learning_rate": 1.3865870109847756e-05, | |
| "loss": 0.0648, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 4.38, | |
| "learning_rate": 1.3464379777734953e-05, | |
| "loss": 0.0586, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 4.43, | |
| "learning_rate": 1.3062889445622151e-05, | |
| "loss": 0.0612, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 1.2661399113509348e-05, | |
| "loss": 0.0601, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 1.2259908781396545e-05, | |
| "loss": 0.0618, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "learning_rate": 1.1858418449283742e-05, | |
| "loss": 0.0612, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 1.145692811717094e-05, | |
| "loss": 0.0598, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 4.67, | |
| "learning_rate": 1.1055437785058137e-05, | |
| "loss": 0.0609, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 4.72, | |
| "learning_rate": 1.0653947452945334e-05, | |
| "loss": 0.0639, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 1.0252457120832531e-05, | |
| "loss": 0.0639, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 4.82, | |
| "learning_rate": 9.85096678871973e-06, | |
| "loss": 0.064, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 9.449476456606926e-06, | |
| "loss": 0.0614, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 4.91, | |
| "learning_rate": 9.047986124494123e-06, | |
| "loss": 0.0618, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "learning_rate": 8.64649579238132e-06, | |
| "loss": 0.0624, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.1161259263753891, | |
| "eval_runtime": 124.2379, | |
| "eval_samples_per_second": 89.9, | |
| "eval_steps_per_second": 11.245, | |
| "step": 51890 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 62268, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "total_flos": 4.977120588123341e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |