| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 5.888538380651945, |
| "eval_steps": 100.0, |
| "global_step": 700, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08, |
| "learning_rate": 5e-06, |
| "loss": 4.1311, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 1e-05, |
| "loss": 4.0983, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.25, |
| "learning_rate": 1.5e-05, |
| "loss": 3.9944, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.34, |
| "learning_rate": 2e-05, |
| "loss": 3.8495, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 2.5e-05, |
| "loss": 3.5944, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 3e-05, |
| "loss": 3.1649, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.59, |
| "learning_rate": 3.5e-05, |
| "loss": 3.0072, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.67, |
| "learning_rate": 4e-05, |
| "loss": 2.9697, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.76, |
| "learning_rate": 4.5e-05, |
| "loss": 2.8497, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.84, |
| "learning_rate": 5e-05, |
| "loss": 2.8376, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 4.998942375205502e-05, |
| "loss": 2.8255, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 4.995770395678171e-05, |
| "loss": 2.7066, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.09, |
| "learning_rate": 4.990486745229364e-05, |
| "loss": 2.6717, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.18, |
| "learning_rate": 4.983095894354858e-05, |
| "loss": 2.6093, |
| "step": 140 |
| }, |
| { |
| "epoch": 1.26, |
| "learning_rate": 4.973604096452361e-05, |
| "loss": 2.588, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.35, |
| "learning_rate": 4.962019382530521e-05, |
| "loss": 2.5881, |
| "step": 160 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 4.948351554413879e-05, |
| "loss": 2.4645, |
| "step": 170 |
| }, |
| { |
| "epoch": 1.51, |
| "learning_rate": 4.9326121764495596e-05, |
| "loss": 2.4118, |
| "step": 180 |
| }, |
| { |
| "epoch": 1.6, |
| "learning_rate": 4.914814565722671e-05, |
| "loss": 2.5078, |
| "step": 190 |
| }, |
| { |
| "epoch": 1.68, |
| "learning_rate": 4.894973780788722e-05, |
| "loss": 2.3461, |
| "step": 200 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 4.873106608932585e-05, |
| "loss": 2.3559, |
| "step": 210 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 4.849231551964771e-05, |
| "loss": 2.4097, |
| "step": 220 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 4.823368810567056e-05, |
| "loss": 2.2607, |
| "step": 230 |
| }, |
| { |
| "epoch": 2.02, |
| "learning_rate": 4.7955402672006854e-05, |
| "loss": 2.1208, |
| "step": 240 |
| }, |
| { |
| "epoch": 2.1, |
| "learning_rate": 4.765769467591625e-05, |
| "loss": 2.112, |
| "step": 250 |
| }, |
| { |
| "epoch": 2.19, |
| "learning_rate": 4.734081600808531e-05, |
| "loss": 2.1259, |
| "step": 260 |
| }, |
| { |
| "epoch": 2.27, |
| "learning_rate": 4.700503477950278e-05, |
| "loss": 2.1712, |
| "step": 270 |
| }, |
| { |
| "epoch": 2.36, |
| "learning_rate": 4.665063509461097e-05, |
| "loss": 1.9872, |
| "step": 280 |
| }, |
| { |
| "epoch": 2.44, |
| "learning_rate": 4.627791681092499e-05, |
| "loss": 1.9918, |
| "step": 290 |
| }, |
| { |
| "epoch": 2.52, |
| "learning_rate": 4.588719528532342e-05, |
| "loss": 2.0882, |
| "step": 300 |
| }, |
| { |
| "epoch": 2.61, |
| "learning_rate": 4.54788011072248e-05, |
| "loss": 1.9361, |
| "step": 310 |
| }, |
| { |
| "epoch": 2.69, |
| "learning_rate": 4.50530798188761e-05, |
| "loss": 1.9715, |
| "step": 320 |
| }, |
| { |
| "epoch": 2.78, |
| "learning_rate": 4.4610391622989396e-05, |
| "loss": 1.8659, |
| "step": 330 |
| }, |
| { |
| "epoch": 2.86, |
| "learning_rate": 4.415111107797445e-05, |
| "loss": 1.8453, |
| "step": 340 |
| }, |
| { |
| "epoch": 2.94, |
| "learning_rate": 4.36756267810249e-05, |
| "loss": 1.9347, |
| "step": 350 |
| }, |
| { |
| "epoch": 3.03, |
| "learning_rate": 4.318434103932622e-05, |
| "loss": 1.8471, |
| "step": 360 |
| }, |
| { |
| "epoch": 3.11, |
| "learning_rate": 4.267766952966369e-05, |
| "loss": 1.7312, |
| "step": 370 |
| }, |
| { |
| "epoch": 3.2, |
| "learning_rate": 4.215604094671835e-05, |
| "loss": 1.5693, |
| "step": 380 |
| }, |
| { |
| "epoch": 3.28, |
| "learning_rate": 4.1619896640348445e-05, |
| "loss": 1.7114, |
| "step": 390 |
| }, |
| { |
| "epoch": 3.36, |
| "learning_rate": 4.1069690242163484e-05, |
| "loss": 1.5693, |
| "step": 400 |
| }, |
| { |
| "epoch": 3.45, |
| "learning_rate": 4.05058872817065e-05, |
| "loss": 1.588, |
| "step": 410 |
| }, |
| { |
| "epoch": 3.53, |
| "learning_rate": 3.9928964792569655e-05, |
| "loss": 1.6776, |
| "step": 420 |
| }, |
| { |
| "epoch": 3.62, |
| "learning_rate": 3.933941090877615e-05, |
| "loss": 1.633, |
| "step": 430 |
| }, |
| { |
| "epoch": 3.7, |
| "learning_rate": 3.873772445177015e-05, |
| "loss": 1.5435, |
| "step": 440 |
| }, |
| { |
| "epoch": 3.79, |
| "learning_rate": 3.8124414508364e-05, |
| "loss": 1.522, |
| "step": 450 |
| }, |
| { |
| "epoch": 3.87, |
| "learning_rate": 3.7500000000000003e-05, |
| "loss": 1.4307, |
| "step": 460 |
| }, |
| { |
| "epoch": 3.95, |
| "learning_rate": 3.686500924369101e-05, |
| "loss": 1.6171, |
| "step": 470 |
| }, |
| { |
| "epoch": 4.04, |
| "learning_rate": 3.621997950501156e-05, |
| "loss": 1.482, |
| "step": 480 |
| }, |
| { |
| "epoch": 4.12, |
| "learning_rate": 3.556545654351749e-05, |
| "loss": 1.4552, |
| "step": 490 |
| }, |
| { |
| "epoch": 4.21, |
| "learning_rate": 3.490199415097892e-05, |
| "loss": 1.2726, |
| "step": 500 |
| }, |
| { |
| "epoch": 4.29, |
| "learning_rate": 3.423015368281711e-05, |
| "loss": 1.312, |
| "step": 510 |
| }, |
| { |
| "epoch": 4.37, |
| "learning_rate": 3.355050358314172e-05, |
| "loss": 1.3236, |
| "step": 520 |
| }, |
| { |
| "epoch": 4.46, |
| "learning_rate": 3.2863618903790346e-05, |
| "loss": 1.2786, |
| "step": 530 |
| }, |
| { |
| "epoch": 4.54, |
| "learning_rate": 3.217008081777726e-05, |
| "loss": 1.2587, |
| "step": 540 |
| }, |
| { |
| "epoch": 4.63, |
| "learning_rate": 3.147047612756302e-05, |
| "loss": 1.3509, |
| "step": 550 |
| }, |
| { |
| "epoch": 4.71, |
| "learning_rate": 3.076539676856101e-05, |
| "loss": 1.2632, |
| "step": 560 |
| }, |
| { |
| "epoch": 4.79, |
| "learning_rate": 3.0055439308300952e-05, |
| "loss": 1.2215, |
| "step": 570 |
| }, |
| { |
| "epoch": 4.88, |
| "learning_rate": 2.9341204441673266e-05, |
| "loss": 1.2773, |
| "step": 580 |
| }, |
| { |
| "epoch": 4.96, |
| "learning_rate": 2.8623296482681166e-05, |
| "loss": 1.3122, |
| "step": 590 |
| }, |
| { |
| "epoch": 5.05, |
| "learning_rate": 2.7902322853130757e-05, |
| "loss": 1.2999, |
| "step": 600 |
| }, |
| { |
| "epoch": 5.13, |
| "learning_rate": 2.717889356869146e-05, |
| "loss": 1.13, |
| "step": 610 |
| }, |
| { |
| "epoch": 5.22, |
| "learning_rate": 2.6453620722761896e-05, |
| "loss": 1.1283, |
| "step": 620 |
| }, |
| { |
| "epoch": 5.3, |
| "learning_rate": 2.5727117968577784e-05, |
| "loss": 1.0922, |
| "step": 630 |
| }, |
| { |
| "epoch": 5.38, |
| "learning_rate": 2.5e-05, |
| "loss": 1.0549, |
| "step": 640 |
| }, |
| { |
| "epoch": 5.47, |
| "learning_rate": 2.4272882031422215e-05, |
| "loss": 1.0966, |
| "step": 650 |
| }, |
| { |
| "epoch": 5.55, |
| "learning_rate": 2.3546379277238107e-05, |
| "loss": 1.2114, |
| "step": 660 |
| }, |
| { |
| "epoch": 5.64, |
| "learning_rate": 2.2821106431308544e-05, |
| "loss": 0.9685, |
| "step": 670 |
| }, |
| { |
| "epoch": 5.72, |
| "learning_rate": 2.2097677146869242e-05, |
| "loss": 0.9688, |
| "step": 680 |
| }, |
| { |
| "epoch": 5.8, |
| "learning_rate": 2.1376703517318837e-05, |
| "loss": 1.0943, |
| "step": 690 |
| }, |
| { |
| "epoch": 5.89, |
| "learning_rate": 2.0658795558326743e-05, |
| "loss": 0.9363, |
| "step": 700 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 1180, |
| "num_train_epochs": 10, |
| "save_steps": 100, |
| "total_flos": 6.058731534974976e+17, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|