| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 18750, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 18.305925369262695, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 1.635, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 15.116157531738281, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 0.5409, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 13.341463088989258, | |
| "learning_rate": 4e-05, | |
| "loss": 0.4258, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 11.549241065979004, | |
| "learning_rate": 4.9993231029486544e-05, | |
| "loss": 0.3779, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 13.320897102355957, | |
| "learning_rate": 4.983095894354858e-05, | |
| "loss": 0.3566, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 15.671639442443848, | |
| "learning_rate": 4.9453690018345144e-05, | |
| "loss": 0.3249, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 10.806138038635254, | |
| "learning_rate": 4.88646908061933e-05, | |
| "loss": 0.2979, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.03432423993945122, | |
| "learning_rate": 4.806906110888606e-05, | |
| "loss": 0.2806, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 11.576502799987793, | |
| "learning_rate": 4.707368982147318e-05, | |
| "loss": 0.257, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.5009346008300781, | |
| "learning_rate": 4.588719528532342e-05, | |
| "loss": 0.2509, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 10.398884773254395, | |
| "learning_rate": 4.4519850666916484e-05, | |
| "loss": 0.2642, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.5586361885070801, | |
| "learning_rate": 4.2983495008466276e-05, | |
| "loss": 0.2452, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9526, | |
| "eval_loss": 0.2175797075033188, | |
| "eval_runtime": 288.1127, | |
| "eval_samples_per_second": 17.354, | |
| "eval_steps_per_second": 2.169, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 15.244111061096191, | |
| "learning_rate": 4.129143072053638e-05, | |
| "loss": 0.1726, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 23.2961368560791, | |
| "learning_rate": 3.945830840419966e-05, | |
| "loss": 0.1662, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 48.89887619018555, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.1588, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 1.5729694496258162e-05, | |
| "learning_rate": 3.543346136204545e-05, | |
| "loss": 0.1415, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.3599999999999999, | |
| "grad_norm": 0.0002951535861939192, | |
| "learning_rate": 3.327658544712395e-05, | |
| "loss": 0.1514, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 15.348397254943848, | |
| "learning_rate": 3.104804738999169e-05, | |
| "loss": 0.1365, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 0.00016808633517939597, | |
| "learning_rate": 2.876714280623708e-05, | |
| "loss": 0.1339, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 30.164745330810547, | |
| "learning_rate": 2.6453620722761896e-05, | |
| "loss": 0.138, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.6800000000000002, | |
| "grad_norm": 6.264598050620407e-05, | |
| "learning_rate": 2.4127512582437485e-05, | |
| "loss": 0.1395, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "grad_norm": 0.0032692451495677233, | |
| "learning_rate": 2.1808958803485136e-05, | |
| "loss": 0.1151, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.8399999999999999, | |
| "grad_norm": 0.0009920683223754168, | |
| "learning_rate": 1.9518034395302414e-05, | |
| "loss": 0.1159, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 0.0016635229112580419, | |
| "learning_rate": 1.7274575140626318e-05, | |
| "loss": 0.1204, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.007010475266724825, | |
| "learning_rate": 1.509800584902108e-05, | |
| "loss": 0.1096, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9636, | |
| "eval_loss": 0.16214776039123535, | |
| "eval_runtime": 287.6078, | |
| "eval_samples_per_second": 17.385, | |
| "eval_steps_per_second": 2.173, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "grad_norm": 0.001354572712443769, | |
| "learning_rate": 1.3007172168743854e-05, | |
| "loss": 0.0312, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 8.306991730933078e-06, | |
| "learning_rate": 1.1020177413231334e-05, | |
| "loss": 0.0281, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 2.6390673156129196e-05, | |
| "learning_rate": 9.154225815032242e-06, | |
| "loss": 0.0361, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 0.00028656359063461423, | |
| "learning_rate": 7.4254735643584564e-06, | |
| "loss": 0.06, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 3.382603972568177e-05, | |
| "learning_rate": 5.848888922025553e-06, | |
| "loss": 0.0392, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "grad_norm": 0.001894937246106565, | |
| "learning_rate": 4.438122617983443e-06, | |
| "loss": 0.0332, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 2.6853955205297098e-05, | |
| "learning_rate": 3.205389657580943e-06, | |
| "loss": 0.0362, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "grad_norm": 0.7828443646430969, | |
| "learning_rate": 2.1613635589349756e-06, | |
| "loss": 0.0324, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 2.7199999999999998, | |
| "grad_norm": 0.1359904706478119, | |
| "learning_rate": 1.31508393714177e-06, | |
| "loss": 0.0474, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 0.0003148973046336323, | |
| "learning_rate": 6.738782355044049e-07, | |
| "loss": 0.0387, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 0.00042670563561841846, | |
| "learning_rate": 2.4329828146074095e-07, | |
| "loss": 0.028, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "grad_norm": 9.56340809352696e-05, | |
| "learning_rate": 2.7072216536885853e-08, | |
| "loss": 0.0349, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9716, | |
| "eval_loss": 0.18466220796108246, | |
| "eval_runtime": 286.2615, | |
| "eval_samples_per_second": 17.467, | |
| "eval_steps_per_second": 2.183, | |
| "step": 18750 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 18750, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |