| { | |
| "best_metric": 1.8454276323318481, | |
| "best_model_checkpoint": "/home/llmadmin/lawrence/autoWSB/data/tunned_v4/checkpoint-1000", | |
| "epoch": 2.9994666666666667, | |
| "global_step": 4218, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 8.333333333333333e-05, | |
| "loss": 1.9887, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00016666666666666666, | |
| "loss": 1.7819, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00025, | |
| "loss": 1.7572, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00024400444104134763, | |
| "loss": 3.1654, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00023762618683001533, | |
| "loss": 2.402, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "eval_loss": 2.0042166709899902, | |
| "eval_runtime": 1078.6167, | |
| "eval_samples_per_second": 23.178, | |
| "eval_steps_per_second": 0.579, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.000231247932618683, | |
| "loss": 1.9543, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.0002248696784073507, | |
| "loss": 1.905, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00021849142419601836, | |
| "loss": 1.8835, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00021211316998468607, | |
| "loss": 1.8628, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.00020573491577335375, | |
| "loss": 1.8463, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "eval_loss": 1.8454276323318481, | |
| "eval_runtime": 1080.8608, | |
| "eval_samples_per_second": 23.13, | |
| "eval_steps_per_second": 0.578, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00019935666156202145, | |
| "loss": 1.8654, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.00019297840735068913, | |
| "loss": 1.8511, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00018660015313935684, | |
| "loss": 1.8424, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.0001802218989280245, | |
| "loss": 1.8579, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 0.00017843598774885147, | |
| "loss": 23.4765, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "eval_loss": 10.320993423461914, | |
| "eval_runtime": 1165.3618, | |
| "eval_samples_per_second": 21.453, | |
| "eval_steps_per_second": 0.536, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.00017767059724349159, | |
| "loss": 26.8324, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00017129234303215926, | |
| "loss": 33.5803, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.00016491408882082697, | |
| "loss": 75831.04, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.00015853583460949465, | |
| "loss": 112434.02, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.00015215758039816232, | |
| "loss": 9892.82, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "eval_loss": 10.320993423461914, | |
| "eval_runtime": 1166.8445, | |
| "eval_samples_per_second": 21.425, | |
| "eval_steps_per_second": 0.536, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 0.00014577932618683, | |
| "loss": 9040.6219, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 0.0001394010719754977, | |
| "loss": 2015.4547, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 0.0001330228177641654, | |
| "loss": 16854.6437, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 0.0001266445635528331, | |
| "loss": 795.047, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 0.00012026630934150074, | |
| "loss": 38857.9575, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "eval_loss": 10.320993423461914, | |
| "eval_runtime": 1167.9826, | |
| "eval_samples_per_second": 21.404, | |
| "eval_steps_per_second": 0.535, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 0.00011388805513016846, | |
| "loss": 185.0979, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 0.00010750980091883614, | |
| "loss": 12796.2138, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 0.00010113154670750384, | |
| "loss": 14625.28, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 9.475329249617151e-05, | |
| "loss": 271.5516, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 8.837503828483921e-05, | |
| "loss": 5358.3219, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "eval_loss": 10.320993423461914, | |
| "eval_runtime": 1167.2296, | |
| "eval_samples_per_second": 21.418, | |
| "eval_steps_per_second": 0.535, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 8.199678407350689e-05, | |
| "loss": 106711.13, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 7.561852986217458e-05, | |
| "loss": 141536.53, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 6.924027565084226e-05, | |
| "loss": 784.2875, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 6.286202143950997e-05, | |
| "loss": 95102.5, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 5.648376722817764e-05, | |
| "loss": 2327.5258, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "eval_loss": 10.320993423461914, | |
| "eval_runtime": 1166.8332, | |
| "eval_samples_per_second": 21.426, | |
| "eval_steps_per_second": 0.536, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 5.010551301684534e-05, | |
| "loss": 59017.795, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 4.3727258805513014e-05, | |
| "loss": 680382.64, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 3.734900459418072e-05, | |
| "loss": 1483.1705, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 3.097075038284839e-05, | |
| "loss": 30906.0775, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "learning_rate": 2.459249617151609e-05, | |
| "loss": 46132.635, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "eval_loss": 10.320993423461914, | |
| "eval_runtime": 1166.5367, | |
| "eval_samples_per_second": 21.431, | |
| "eval_steps_per_second": 0.536, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "learning_rate": 1.8214241960183767e-05, | |
| "loss": 31202.95, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 1.1835987748851469e-05, | |
| "loss": 2477.5823, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 4218, | |
| "total_flos": 2.7466670484779696e+19, | |
| "train_loss": 35493.948473290504, | |
| "train_runtime": 121039.6951, | |
| "train_samples_per_second": 5.577, | |
| "train_steps_per_second": 0.035 | |
| } | |
| ], | |
| "max_steps": 4218, | |
| "num_train_epochs": 3, | |
| "total_flos": 2.7466670484779696e+19, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |