| { | |
| "best_metric": 0.0028607482090592384, | |
| "best_model_checkpoint": "autotrain-9ikup-ih7yd/checkpoint-300", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 1.0231589078903198, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.6826, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 1.1381429433822632, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.6653, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.9357160925865173, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.6252, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 1.420783519744873, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.5543, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 1.1961886882781982, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 0.432, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 1.0095195770263672, | |
| "learning_rate": 5e-05, | |
| "loss": 0.3005, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.7485983967781067, | |
| "learning_rate": 4.9074074074074075e-05, | |
| "loss": 0.185, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.5755214095115662, | |
| "learning_rate": 4.814814814814815e-05, | |
| "loss": 0.1152, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.4263462424278259, | |
| "learning_rate": 4.722222222222222e-05, | |
| "loss": 0.0794, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.3358534872531891, | |
| "learning_rate": 4.62962962962963e-05, | |
| "loss": 0.0557, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.2775576114654541, | |
| "learning_rate": 4.5370370370370374e-05, | |
| "loss": 0.0434, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.23248009383678436, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 0.0336, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.2064099758863449, | |
| "learning_rate": 4.351851851851852e-05, | |
| "loss": 0.028, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.1811286062002182, | |
| "learning_rate": 4.259259259259259e-05, | |
| "loss": 0.0239, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.16121040284633636, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 0.0197, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.15021204948425293, | |
| "learning_rate": 4.074074074074074e-05, | |
| "loss": 0.0179, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.13360002636909485, | |
| "learning_rate": 3.981481481481482e-05, | |
| "loss": 0.0162, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.1236184760928154, | |
| "learning_rate": 3.888888888888889e-05, | |
| "loss": 0.0146, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.11776061356067657, | |
| "learning_rate": 3.7962962962962964e-05, | |
| "loss": 0.0129, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.11281422525644302, | |
| "learning_rate": 3.7037037037037037e-05, | |
| "loss": 0.012, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 1.0, | |
| "eval_auc": 1.0, | |
| "eval_f1": 1.0, | |
| "eval_loss": 0.009027771651744843, | |
| "eval_precision": 1.0, | |
| "eval_recall": 1.0, | |
| "eval_runtime": 36.0656, | |
| "eval_samples_per_second": 5.545, | |
| "eval_steps_per_second": 0.36, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 0.09917238354682922, | |
| "learning_rate": 3.611111111111111e-05, | |
| "loss": 0.0112, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 0.1017189547419548, | |
| "learning_rate": 3.518518518518519e-05, | |
| "loss": 0.0106, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 0.09118398278951645, | |
| "learning_rate": 3.425925925925926e-05, | |
| "loss": 0.0096, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.08792706578969955, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.0091, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.08533497899770737, | |
| "learning_rate": 3.240740740740741e-05, | |
| "loss": 0.0087, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "grad_norm": 0.07599101215600967, | |
| "learning_rate": 3.148148148148148e-05, | |
| "loss": 0.0083, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "grad_norm": 0.07442031055688858, | |
| "learning_rate": 3.055555555555556e-05, | |
| "loss": 0.0075, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 0.07316122204065323, | |
| "learning_rate": 2.962962962962963e-05, | |
| "loss": 0.0076, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "grad_norm": 0.06952174752950668, | |
| "learning_rate": 2.8703703703703706e-05, | |
| "loss": 0.0069, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.0664600282907486, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.0067, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "grad_norm": 0.06737347692251205, | |
| "learning_rate": 2.6851851851851855e-05, | |
| "loss": 0.0067, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.06326944380998611, | |
| "learning_rate": 2.5925925925925925e-05, | |
| "loss": 0.0064, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "grad_norm": 0.06255871057510376, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.0061, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "grad_norm": 0.05803116410970688, | |
| "learning_rate": 2.4074074074074074e-05, | |
| "loss": 0.0057, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.0586419552564621, | |
| "learning_rate": 2.314814814814815e-05, | |
| "loss": 0.0056, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 0.053392261266708374, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.0054, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "grad_norm": 0.05675709992647171, | |
| "learning_rate": 2.1296296296296296e-05, | |
| "loss": 0.0053, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 0.05368509143590927, | |
| "learning_rate": 2.037037037037037e-05, | |
| "loss": 0.0051, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "grad_norm": 0.05122752860188484, | |
| "learning_rate": 1.9444444444444445e-05, | |
| "loss": 0.0051, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.05085288733243942, | |
| "learning_rate": 1.8518518518518518e-05, | |
| "loss": 0.0048, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 1.0, | |
| "eval_auc": 1.0, | |
| "eval_f1": 1.0, | |
| "eval_loss": 0.0036500704009085894, | |
| "eval_precision": 1.0, | |
| "eval_recall": 1.0, | |
| "eval_runtime": 38.0889, | |
| "eval_samples_per_second": 5.251, | |
| "eval_steps_per_second": 0.341, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "grad_norm": 0.05097561329603195, | |
| "learning_rate": 1.7592592592592595e-05, | |
| "loss": 0.0047, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 0.04888813570141792, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.0046, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "grad_norm": 0.04953450709581375, | |
| "learning_rate": 1.574074074074074e-05, | |
| "loss": 0.0047, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 0.05090043693780899, | |
| "learning_rate": 1.4814814814814815e-05, | |
| "loss": 0.0047, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 0.04786603897809982, | |
| "learning_rate": 1.388888888888889e-05, | |
| "loss": 0.0044, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "grad_norm": 0.046088654547929764, | |
| "learning_rate": 1.2962962962962962e-05, | |
| "loss": 0.0042, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "grad_norm": 0.04463566094636917, | |
| "learning_rate": 1.2037037037037037e-05, | |
| "loss": 0.0041, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 0.04407651722431183, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 0.0041, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "grad_norm": 0.04530609771609306, | |
| "learning_rate": 1.0185185185185185e-05, | |
| "loss": 0.0043, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.0432138666510582, | |
| "learning_rate": 9.259259259259259e-06, | |
| "loss": 0.004, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "grad_norm": 0.04460172727704048, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.0041, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 0.04314126819372177, | |
| "learning_rate": 7.4074074074074075e-06, | |
| "loss": 0.0039, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "grad_norm": 0.04184052720665932, | |
| "learning_rate": 6.481481481481481e-06, | |
| "loss": 0.004, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "grad_norm": 0.04184694215655327, | |
| "learning_rate": 5.555555555555556e-06, | |
| "loss": 0.0037, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 0.04550705850124359, | |
| "learning_rate": 4.6296296296296296e-06, | |
| "loss": 0.0039, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 0.04213162884116173, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 0.0039, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "grad_norm": 0.0391254797577858, | |
| "learning_rate": 2.777777777777778e-06, | |
| "loss": 0.0037, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "grad_norm": 0.03980780765414238, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "loss": 0.0039, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "grad_norm": 0.040770865976810455, | |
| "learning_rate": 9.259259259259259e-07, | |
| "loss": 0.0039, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.042418189346790314, | |
| "learning_rate": 0.0, | |
| "loss": 0.0038, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 1.0, | |
| "eval_auc": 1.0, | |
| "eval_f1": 1.0, | |
| "eval_loss": 0.0028607482090592384, | |
| "eval_precision": 1.0, | |
| "eval_recall": 1.0, | |
| "eval_runtime": 35.7094, | |
| "eval_samples_per_second": 5.601, | |
| "eval_steps_per_second": 0.364, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 300, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.01 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 2 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 157866633216000.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |