| { | |
| "best_metric": 0.946677833612507, | |
| "best_model_checkpoint": "/tmp/classification_phobert-v2/checkpoint-2522", | |
| "epoch": 40.0, | |
| "eval_steps": 500, | |
| "global_step": 7760, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "eval_f1": 0.9346733668341709, | |
| "eval_loss": 0.2288697063922882, | |
| "eval_runtime": 9.6999, | |
| "eval_samples_per_second": 369.281, | |
| "eval_steps_per_second": 2.887, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_f1": 0.9399776661083193, | |
| "eval_loss": 0.1812455654144287, | |
| "eval_runtime": 9.6897, | |
| "eval_samples_per_second": 369.672, | |
| "eval_steps_per_second": 2.89, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 2.5773195876288657, | |
| "grad_norm": 3.7161166667938232, | |
| "learning_rate": 2.806701030927835e-05, | |
| "loss": 0.2405, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_f1": 0.9447236180904522, | |
| "eval_loss": 0.17744559049606323, | |
| "eval_runtime": 9.6898, | |
| "eval_samples_per_second": 369.666, | |
| "eval_steps_per_second": 2.89, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_f1": 0.9433277498604131, | |
| "eval_loss": 0.1997271031141281, | |
| "eval_runtime": 9.6886, | |
| "eval_samples_per_second": 369.711, | |
| "eval_steps_per_second": 2.89, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_f1": 0.9427694025683976, | |
| "eval_loss": 0.22364477813243866, | |
| "eval_runtime": 9.704, | |
| "eval_samples_per_second": 369.128, | |
| "eval_steps_per_second": 2.885, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 5.154639175257732, | |
| "grad_norm": 8.774168968200684, | |
| "learning_rate": 2.61340206185567e-05, | |
| "loss": 0.1112, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_f1": 0.9380234505862647, | |
| "eval_loss": 0.24477960169315338, | |
| "eval_runtime": 9.6887, | |
| "eval_samples_per_second": 369.71, | |
| "eval_steps_per_second": 2.89, | |
| "step": 1164 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_f1": 0.9441652707984366, | |
| "eval_loss": 0.22501157224178314, | |
| "eval_runtime": 9.6707, | |
| "eval_samples_per_second": 370.399, | |
| "eval_steps_per_second": 2.895, | |
| "step": 1358 | |
| }, | |
| { | |
| "epoch": 7.731958762886598, | |
| "grad_norm": 1.4358633756637573, | |
| "learning_rate": 2.4201030927835052e-05, | |
| "loss": 0.0717, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_f1": 0.9413735343383585, | |
| "eval_loss": 0.24096541106700897, | |
| "eval_runtime": 9.6976, | |
| "eval_samples_per_second": 369.369, | |
| "eval_steps_per_second": 2.887, | |
| "step": 1552 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_f1": 0.9413735343383585, | |
| "eval_loss": 0.24880127608776093, | |
| "eval_runtime": 9.685, | |
| "eval_samples_per_second": 369.852, | |
| "eval_steps_per_second": 2.891, | |
| "step": 1746 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_f1": 0.9447236180904522, | |
| "eval_loss": 0.26668497920036316, | |
| "eval_runtime": 9.6717, | |
| "eval_samples_per_second": 370.357, | |
| "eval_steps_per_second": 2.895, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 10.309278350515465, | |
| "grad_norm": 1.2549242973327637, | |
| "learning_rate": 2.2268041237113402e-05, | |
| "loss": 0.0525, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_f1": 0.9455611390284757, | |
| "eval_loss": 0.26830339431762695, | |
| "eval_runtime": 9.6812, | |
| "eval_samples_per_second": 369.996, | |
| "eval_steps_per_second": 2.892, | |
| "step": 2134 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_f1": 0.9413735343383585, | |
| "eval_loss": 0.3144669532775879, | |
| "eval_runtime": 9.668, | |
| "eval_samples_per_second": 370.501, | |
| "eval_steps_per_second": 2.896, | |
| "step": 2328 | |
| }, | |
| { | |
| "epoch": 12.88659793814433, | |
| "grad_norm": 0.8762478232383728, | |
| "learning_rate": 2.0335051546391752e-05, | |
| "loss": 0.0402, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_f1": 0.946677833612507, | |
| "eval_loss": 0.2749080955982208, | |
| "eval_runtime": 9.685, | |
| "eval_samples_per_second": 369.852, | |
| "eval_steps_per_second": 2.891, | |
| "step": 2522 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_f1": 0.9430485762144054, | |
| "eval_loss": 0.3029841482639313, | |
| "eval_runtime": 9.6934, | |
| "eval_samples_per_second": 369.531, | |
| "eval_steps_per_second": 2.889, | |
| "step": 2716 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_f1": 0.9458403126744835, | |
| "eval_loss": 0.3058845102787018, | |
| "eval_runtime": 9.6767, | |
| "eval_samples_per_second": 370.168, | |
| "eval_steps_per_second": 2.894, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 15.463917525773196, | |
| "grad_norm": 0.3448758125305176, | |
| "learning_rate": 1.8402061855670106e-05, | |
| "loss": 0.0285, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_f1": 0.9424902289223898, | |
| "eval_loss": 0.3259759247303009, | |
| "eval_runtime": 9.6809, | |
| "eval_samples_per_second": 370.008, | |
| "eval_steps_per_second": 2.892, | |
| "step": 3104 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_f1": 0.9463986599664992, | |
| "eval_loss": 0.32083001732826233, | |
| "eval_runtime": 9.6806, | |
| "eval_samples_per_second": 370.018, | |
| "eval_steps_per_second": 2.892, | |
| "step": 3298 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_f1": 0.9394193188163037, | |
| "eval_loss": 0.3752443492412567, | |
| "eval_runtime": 9.6833, | |
| "eval_samples_per_second": 369.914, | |
| "eval_steps_per_second": 2.892, | |
| "step": 3492 | |
| }, | |
| { | |
| "epoch": 18.04123711340206, | |
| "grad_norm": 0.16492746770381927, | |
| "learning_rate": 1.6469072164948456e-05, | |
| "loss": 0.0225, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_f1": 0.9450027917364601, | |
| "eval_loss": 0.3408117890357971, | |
| "eval_runtime": 9.6821, | |
| "eval_samples_per_second": 369.963, | |
| "eval_steps_per_second": 2.892, | |
| "step": 3686 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_f1": 0.9366275823562256, | |
| "eval_loss": 0.41283130645751953, | |
| "eval_runtime": 9.6877, | |
| "eval_samples_per_second": 369.748, | |
| "eval_steps_per_second": 2.89, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 20.61855670103093, | |
| "grad_norm": 0.5357736945152283, | |
| "learning_rate": 1.4536082474226805e-05, | |
| "loss": 0.0166, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_f1": 0.9408151870463428, | |
| "eval_loss": 0.37989336252212524, | |
| "eval_runtime": 9.6608, | |
| "eval_samples_per_second": 370.777, | |
| "eval_steps_per_second": 2.898, | |
| "step": 4074 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_f1": 0.9388609715242882, | |
| "eval_loss": 0.3940359354019165, | |
| "eval_runtime": 9.6719, | |
| "eval_samples_per_second": 370.351, | |
| "eval_steps_per_second": 2.895, | |
| "step": 4268 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_f1": 0.9450027917364601, | |
| "eval_loss": 0.37402409315109253, | |
| "eval_runtime": 9.6886, | |
| "eval_samples_per_second": 369.713, | |
| "eval_steps_per_second": 2.89, | |
| "step": 4462 | |
| }, | |
| { | |
| "epoch": 23.195876288659793, | |
| "grad_norm": 0.6457699537277222, | |
| "learning_rate": 1.2603092783505155e-05, | |
| "loss": 0.0153, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_f1": 0.9399776661083193, | |
| "eval_loss": 0.3809983432292938, | |
| "eval_runtime": 9.6663, | |
| "eval_samples_per_second": 370.567, | |
| "eval_steps_per_second": 2.897, | |
| "step": 4656 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_f1": 0.9402568397543272, | |
| "eval_loss": 0.4277588427066803, | |
| "eval_runtime": 9.6786, | |
| "eval_samples_per_second": 370.097, | |
| "eval_steps_per_second": 2.893, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 25.77319587628866, | |
| "grad_norm": 0.11396524310112, | |
| "learning_rate": 1.0670103092783506e-05, | |
| "loss": 0.0122, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_f1": 0.943606923506421, | |
| "eval_loss": 0.3878323435783386, | |
| "eval_runtime": 9.6823, | |
| "eval_samples_per_second": 369.953, | |
| "eval_steps_per_second": 2.892, | |
| "step": 5044 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "eval_f1": 0.9433277498604131, | |
| "eval_loss": 0.3903436064720154, | |
| "eval_runtime": 9.6664, | |
| "eval_samples_per_second": 370.561, | |
| "eval_steps_per_second": 2.897, | |
| "step": 5238 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_f1": 0.9441652707984366, | |
| "eval_loss": 0.3903617858886719, | |
| "eval_runtime": 9.6852, | |
| "eval_samples_per_second": 369.843, | |
| "eval_steps_per_second": 2.891, | |
| "step": 5432 | |
| }, | |
| { | |
| "epoch": 28.350515463917525, | |
| "grad_norm": 0.06657901406288147, | |
| "learning_rate": 8.737113402061856e-06, | |
| "loss": 0.0114, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "eval_f1": 0.9427694025683976, | |
| "eval_loss": 0.42051565647125244, | |
| "eval_runtime": 9.6905, | |
| "eval_samples_per_second": 369.642, | |
| "eval_steps_per_second": 2.889, | |
| "step": 5626 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_f1": 0.9433277498604131, | |
| "eval_loss": 0.3968648612499237, | |
| "eval_runtime": 9.6628, | |
| "eval_samples_per_second": 370.701, | |
| "eval_steps_per_second": 2.898, | |
| "step": 5820 | |
| }, | |
| { | |
| "epoch": 30.927835051546392, | |
| "grad_norm": 1.0208468437194824, | |
| "learning_rate": 6.8041237113402065e-06, | |
| "loss": 0.0096, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 31.0, | |
| "eval_f1": 0.9438860971524288, | |
| "eval_loss": 0.39665865898132324, | |
| "eval_runtime": 9.6861, | |
| "eval_samples_per_second": 369.808, | |
| "eval_steps_per_second": 2.891, | |
| "step": 6014 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_f1": 0.9441652707984366, | |
| "eval_loss": 0.40085485577583313, | |
| "eval_runtime": 9.6639, | |
| "eval_samples_per_second": 370.659, | |
| "eval_steps_per_second": 2.897, | |
| "step": 6208 | |
| }, | |
| { | |
| "epoch": 33.0, | |
| "eval_f1": 0.9438860971524288, | |
| "eval_loss": 0.405376136302948, | |
| "eval_runtime": 9.6828, | |
| "eval_samples_per_second": 369.935, | |
| "eval_steps_per_second": 2.892, | |
| "step": 6402 | |
| }, | |
| { | |
| "epoch": 33.50515463917526, | |
| "grad_norm": 0.0191043633967638, | |
| "learning_rate": 4.871134020618557e-06, | |
| "loss": 0.0082, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "eval_f1": 0.9422110552763819, | |
| "eval_loss": 0.4114643335342407, | |
| "eval_runtime": 9.6768, | |
| "eval_samples_per_second": 370.163, | |
| "eval_steps_per_second": 2.894, | |
| "step": 6596 | |
| }, | |
| { | |
| "epoch": 35.0, | |
| "eval_f1": 0.9430485762144054, | |
| "eval_loss": 0.42280834913253784, | |
| "eval_runtime": 9.6847, | |
| "eval_samples_per_second": 369.862, | |
| "eval_steps_per_second": 2.891, | |
| "step": 6790 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "eval_f1": 0.9441652707984366, | |
| "eval_loss": 0.4165395200252533, | |
| "eval_runtime": 9.6675, | |
| "eval_samples_per_second": 370.522, | |
| "eval_steps_per_second": 2.896, | |
| "step": 6984 | |
| }, | |
| { | |
| "epoch": 36.08247422680412, | |
| "grad_norm": 1.23173987865448, | |
| "learning_rate": 2.938144329896907e-06, | |
| "loss": 0.0083, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 37.0, | |
| "eval_f1": 0.943606923506421, | |
| "eval_loss": 0.4226440489292145, | |
| "eval_runtime": 9.6782, | |
| "eval_samples_per_second": 370.111, | |
| "eval_steps_per_second": 2.893, | |
| "step": 7178 | |
| }, | |
| { | |
| "epoch": 38.0, | |
| "eval_f1": 0.9430485762144054, | |
| "eval_loss": 0.4262131452560425, | |
| "eval_runtime": 9.6774, | |
| "eval_samples_per_second": 370.141, | |
| "eval_steps_per_second": 2.893, | |
| "step": 7372 | |
| }, | |
| { | |
| "epoch": 38.65979381443299, | |
| "grad_norm": 1.7488667964935303, | |
| "learning_rate": 1.0051546391752577e-06, | |
| "loss": 0.0071, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 39.0, | |
| "eval_f1": 0.943606923506421, | |
| "eval_loss": 0.42308905720710754, | |
| "eval_runtime": 9.6502, | |
| "eval_samples_per_second": 371.183, | |
| "eval_steps_per_second": 2.901, | |
| "step": 7566 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "eval_f1": 0.9438860971524288, | |
| "eval_loss": 0.4251147508621216, | |
| "eval_runtime": 9.6262, | |
| "eval_samples_per_second": 372.109, | |
| "eval_steps_per_second": 2.909, | |
| "step": 7760 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "step": 7760, | |
| "total_flos": 6.517582470540288e+16, | |
| "train_loss": 0.04250985115152044, | |
| "train_runtime": 8305.2138, | |
| "train_samples_per_second": 119.303, | |
| "train_steps_per_second": 0.934 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 7760, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 40, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.517582470540288e+16, | |
| "train_batch_size": 128, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |