| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 5628, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.053304904051172705, | |
| "grad_norm": 2.3143041133880615, | |
| "learning_rate": 9.900000000000002e-06, | |
| "loss": 0.6637, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.10660980810234541, | |
| "grad_norm": 6.371345043182373, | |
| "learning_rate": 1.9900000000000003e-05, | |
| "loss": 0.505, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.15991471215351813, | |
| "grad_norm": 4.748474597930908, | |
| "learning_rate": 2.9900000000000002e-05, | |
| "loss": 0.4336, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21321961620469082, | |
| "grad_norm": 4.305647373199463, | |
| "learning_rate": 3.99e-05, | |
| "loss": 0.4183, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.26652452025586354, | |
| "grad_norm": 4.751767635345459, | |
| "learning_rate": 4.99e-05, | |
| "loss": 0.4119, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.31982942430703626, | |
| "grad_norm": 2.5453217029571533, | |
| "learning_rate": 4.903471138845554e-05, | |
| "loss": 0.421, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.373134328358209, | |
| "grad_norm": 2.3192925453186035, | |
| "learning_rate": 4.805967238689548e-05, | |
| "loss": 0.423, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.42643923240938164, | |
| "grad_norm": 2.352053642272949, | |
| "learning_rate": 4.708463338533542e-05, | |
| "loss": 0.4051, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.47974413646055436, | |
| "grad_norm": 3.065234661102295, | |
| "learning_rate": 4.610959438377535e-05, | |
| "loss": 0.3808, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.5330490405117271, | |
| "grad_norm": 3.259699583053589, | |
| "learning_rate": 4.513455538221529e-05, | |
| "loss": 0.4178, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.5863539445628998, | |
| "grad_norm": 5.711488723754883, | |
| "learning_rate": 4.415951638065523e-05, | |
| "loss": 0.3846, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.6396588486140725, | |
| "grad_norm": 3.5369982719421387, | |
| "learning_rate": 4.318447737909517e-05, | |
| "loss": 0.3724, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.6929637526652452, | |
| "grad_norm": 1.9052191972732544, | |
| "learning_rate": 4.220943837753511e-05, | |
| "loss": 0.3885, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.746268656716418, | |
| "grad_norm": 6.73522424697876, | |
| "learning_rate": 4.123439937597504e-05, | |
| "loss": 0.3776, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.7995735607675906, | |
| "grad_norm": 4.6753387451171875, | |
| "learning_rate": 4.025936037441498e-05, | |
| "loss": 0.3469, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.8528784648187633, | |
| "grad_norm": 3.484023094177246, | |
| "learning_rate": 3.9284321372854915e-05, | |
| "loss": 0.39, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.906183368869936, | |
| "grad_norm": 3.9286551475524902, | |
| "learning_rate": 3.830928237129485e-05, | |
| "loss": 0.371, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.9594882729211087, | |
| "grad_norm": 4.431338310241699, | |
| "learning_rate": 3.733424336973479e-05, | |
| "loss": 0.3573, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.8105906313645621, | |
| "eval_confusion_matrix": [ | |
| [ | |
| 609, | |
| 296 | |
| ], | |
| [ | |
| 76, | |
| 983 | |
| ] | |
| ], | |
| "eval_f1": 0.8063983144212725, | |
| "eval_loss": 0.4077329933643341, | |
| "eval_precision": 0.8240865672329818, | |
| "eval_recall": 0.8105906313645621, | |
| "eval_runtime": 37.6119, | |
| "eval_samples_per_second": 52.218, | |
| "eval_steps_per_second": 3.27, | |
| "step": 1876 | |
| }, | |
| { | |
| "epoch": 1.0127931769722816, | |
| "grad_norm": 0.8932910561561584, | |
| "learning_rate": 3.635920436817473e-05, | |
| "loss": 0.3484, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.0660980810234542, | |
| "grad_norm": 4.019241809844971, | |
| "learning_rate": 3.538416536661467e-05, | |
| "loss": 0.2809, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.1194029850746268, | |
| "grad_norm": 7.529379367828369, | |
| "learning_rate": 3.4409126365054605e-05, | |
| "loss": 0.2562, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.1727078891257996, | |
| "grad_norm": 2.790593147277832, | |
| "learning_rate": 3.343408736349454e-05, | |
| "loss": 0.2552, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.2260127931769722, | |
| "grad_norm": 1.8328346014022827, | |
| "learning_rate": 3.245904836193448e-05, | |
| "loss": 0.2582, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.279317697228145, | |
| "grad_norm": 7.493434429168701, | |
| "learning_rate": 3.148400936037442e-05, | |
| "loss": 0.246, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.3326226012793176, | |
| "grad_norm": 6.164539813995361, | |
| "learning_rate": 3.0508970358814354e-05, | |
| "loss": 0.2494, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.3859275053304905, | |
| "grad_norm": 4.088465690612793, | |
| "learning_rate": 2.9533931357254292e-05, | |
| "loss": 0.2625, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.439232409381663, | |
| "grad_norm": 3.8281686305999756, | |
| "learning_rate": 2.8558892355694227e-05, | |
| "loss": 0.2886, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.4925373134328357, | |
| "grad_norm": 5.0964226722717285, | |
| "learning_rate": 2.7583853354134165e-05, | |
| "loss": 0.2443, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.5458422174840085, | |
| "grad_norm": 4.0434370040893555, | |
| "learning_rate": 2.6608814352574103e-05, | |
| "loss": 0.244, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.5991471215351813, | |
| "grad_norm": 1.8725121021270752, | |
| "learning_rate": 2.5633775351014045e-05, | |
| "loss": 0.2589, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.652452025586354, | |
| "grad_norm": 4.799729824066162, | |
| "learning_rate": 2.465873634945398e-05, | |
| "loss": 0.2568, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.7057569296375266, | |
| "grad_norm": 6.433875560760498, | |
| "learning_rate": 2.3683697347893914e-05, | |
| "loss": 0.2319, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.7590618336886994, | |
| "grad_norm": 5.887631893157959, | |
| "learning_rate": 2.2708658346333856e-05, | |
| "loss": 0.2581, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.8123667377398722, | |
| "grad_norm": 4.89068078994751, | |
| "learning_rate": 2.1733619344773794e-05, | |
| "loss": 0.234, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.8656716417910446, | |
| "grad_norm": 8.370274543762207, | |
| "learning_rate": 2.075858034321373e-05, | |
| "loss": 0.2487, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.9189765458422174, | |
| "grad_norm": 4.509988307952881, | |
| "learning_rate": 1.9783541341653667e-05, | |
| "loss": 0.2591, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.9722814498933903, | |
| "grad_norm": 3.838970184326172, | |
| "learning_rate": 1.8808502340093605e-05, | |
| "loss": 0.2392, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8411405295315683, | |
| "eval_confusion_matrix": [ | |
| [ | |
| 706, | |
| 199 | |
| ], | |
| [ | |
| 113, | |
| 946 | |
| ] | |
| ], | |
| "eval_f1": 0.8402776027420387, | |
| "eval_loss": 0.44446203112602234, | |
| "eval_precision": 0.8427092846360184, | |
| "eval_recall": 0.8411405295315683, | |
| "eval_runtime": 36.7709, | |
| "eval_samples_per_second": 53.412, | |
| "eval_steps_per_second": 3.345, | |
| "step": 3752 | |
| }, | |
| { | |
| "epoch": 2.025586353944563, | |
| "grad_norm": 7.447359085083008, | |
| "learning_rate": 1.7833463338533543e-05, | |
| "loss": 0.1882, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.0788912579957355, | |
| "grad_norm": 0.26925402879714966, | |
| "learning_rate": 1.685842433697348e-05, | |
| "loss": 0.1558, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.1321961620469083, | |
| "grad_norm": 4.837244510650635, | |
| "learning_rate": 1.5883385335413416e-05, | |
| "loss": 0.1547, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.185501066098081, | |
| "grad_norm": 0.8875559568405151, | |
| "learning_rate": 1.4908346333853354e-05, | |
| "loss": 0.161, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.2388059701492535, | |
| "grad_norm": 4.033316612243652, | |
| "learning_rate": 1.3933307332293292e-05, | |
| "loss": 0.1724, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.2921108742004264, | |
| "grad_norm": 15.54712200164795, | |
| "learning_rate": 1.2958268330733232e-05, | |
| "loss": 0.1586, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.345415778251599, | |
| "grad_norm": 12.946330070495605, | |
| "learning_rate": 1.1983229329173168e-05, | |
| "loss": 0.161, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.398720682302772, | |
| "grad_norm": 3.7774710655212402, | |
| "learning_rate": 1.1008190327613105e-05, | |
| "loss": 0.1489, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.4520255863539444, | |
| "grad_norm": 9.51603889465332, | |
| "learning_rate": 1.0033151326053043e-05, | |
| "loss": 0.152, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 2.5053304904051172, | |
| "grad_norm": 0.13294453918933868, | |
| "learning_rate": 9.05811232449298e-06, | |
| "loss": 0.1512, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 2.55863539445629, | |
| "grad_norm": 2.5648090839385986, | |
| "learning_rate": 8.083073322932917e-06, | |
| "loss": 0.1647, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 2.611940298507463, | |
| "grad_norm": 3.1670095920562744, | |
| "learning_rate": 7.108034321372855e-06, | |
| "loss": 0.1397, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 2.6652452025586353, | |
| "grad_norm": 0.7597922086715698, | |
| "learning_rate": 6.1329953198127925e-06, | |
| "loss": 0.1567, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.718550106609808, | |
| "grad_norm": 24.864763259887695, | |
| "learning_rate": 5.157956318252731e-06, | |
| "loss": 0.1653, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 2.771855010660981, | |
| "grad_norm": 11.073485374450684, | |
| "learning_rate": 4.182917316692668e-06, | |
| "loss": 0.1287, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 2.8251599147121533, | |
| "grad_norm": 0.2030704915523529, | |
| "learning_rate": 3.2078783151326056e-06, | |
| "loss": 0.128, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 2.878464818763326, | |
| "grad_norm": 4.208672523498535, | |
| "learning_rate": 2.232839313572543e-06, | |
| "loss": 0.1525, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 2.931769722814499, | |
| "grad_norm": 13.611940383911133, | |
| "learning_rate": 1.2578003120124805e-06, | |
| "loss": 0.1539, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.9850746268656714, | |
| "grad_norm": 12.76778507232666, | |
| "learning_rate": 2.827613104524181e-07, | |
| "loss": 0.1568, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8441955193482689, | |
| "eval_confusion_matrix": [ | |
| [ | |
| 738, | |
| 167 | |
| ], | |
| [ | |
| 139, | |
| 920 | |
| ] | |
| ], | |
| "eval_f1": 0.8439878980296145, | |
| "eval_loss": 0.5926053524017334, | |
| "eval_precision": 0.8441262177198018, | |
| "eval_recall": 0.8441955193482689, | |
| "eval_runtime": 36.4848, | |
| "eval_samples_per_second": 53.831, | |
| "eval_steps_per_second": 3.371, | |
| "step": 5628 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 5628, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.1924847694411776e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |