| { | |
| "best_metric": 0.2747629944426283, | |
| "best_model_checkpoint": "debertalarge-medical-classifier/checkpoint-366", | |
| "epoch": 25.0, | |
| "eval_steps": 500, | |
| "global_step": 4575, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.546448087431694, | |
| "grad_norm": 4.420255661010742, | |
| "learning_rate": 4.890710382513661e-05, | |
| "loss": 1.0582, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_f1": 0.1245394252026529, | |
| "eval_loss": 1.1087555885314941, | |
| "eval_runtime": 13.0214, | |
| "eval_samples_per_second": 7.065, | |
| "eval_steps_per_second": 3.533, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 1.092896174863388, | |
| "grad_norm": 3.7559633255004883, | |
| "learning_rate": 4.7814207650273224e-05, | |
| "loss": 1.0895, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.639344262295082, | |
| "grad_norm": 10.271145820617676, | |
| "learning_rate": 4.672131147540984e-05, | |
| "loss": 1.1036, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.123813271522522, | |
| "eval_runtime": 13.0611, | |
| "eval_samples_per_second": 7.044, | |
| "eval_steps_per_second": 3.522, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 2.185792349726776, | |
| "grad_norm": 2.9179580211639404, | |
| "learning_rate": 4.562841530054645e-05, | |
| "loss": 1.077, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.73224043715847, | |
| "grad_norm": 2.744880199432373, | |
| "learning_rate": 4.453551912568306e-05, | |
| "loss": 1.086, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1455310583114624, | |
| "eval_runtime": 13.0518, | |
| "eval_samples_per_second": 7.049, | |
| "eval_steps_per_second": 3.524, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 3.278688524590164, | |
| "grad_norm": 2.7470598220825195, | |
| "learning_rate": 4.3442622950819674e-05, | |
| "loss": 1.0524, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.8251366120218577, | |
| "grad_norm": 2.554631233215332, | |
| "learning_rate": 4.234972677595629e-05, | |
| "loss": 1.073, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1285173892974854, | |
| "eval_runtime": 13.0619, | |
| "eval_samples_per_second": 7.043, | |
| "eval_steps_per_second": 3.522, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 4.371584699453552, | |
| "grad_norm": 3.6401052474975586, | |
| "learning_rate": 4.12568306010929e-05, | |
| "loss": 1.0741, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.918032786885246, | |
| "grad_norm": 5.5951972007751465, | |
| "learning_rate": 4.016393442622951e-05, | |
| "loss": 1.0916, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_f1": 0.1245394252026529, | |
| "eval_loss": 1.142012357711792, | |
| "eval_runtime": 13.0456, | |
| "eval_samples_per_second": 7.052, | |
| "eval_steps_per_second": 3.526, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 5.46448087431694, | |
| "grad_norm": 4.914717674255371, | |
| "learning_rate": 3.9071038251366124e-05, | |
| "loss": 1.0869, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1109999418258667, | |
| "eval_runtime": 13.0453, | |
| "eval_samples_per_second": 7.052, | |
| "eval_steps_per_second": 3.526, | |
| "step": 1098 | |
| }, | |
| { | |
| "epoch": 6.0109289617486334, | |
| "grad_norm": 5.487387180328369, | |
| "learning_rate": 3.797814207650273e-05, | |
| "loss": 1.0568, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 6.557377049180328, | |
| "grad_norm": 4.660529136657715, | |
| "learning_rate": 3.6885245901639346e-05, | |
| "loss": 1.0621, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1170544624328613, | |
| "eval_runtime": 13.0506, | |
| "eval_samples_per_second": 7.049, | |
| "eval_steps_per_second": 3.525, | |
| "step": 1281 | |
| }, | |
| { | |
| "epoch": 7.103825136612022, | |
| "grad_norm": 2.940361499786377, | |
| "learning_rate": 3.579234972677596e-05, | |
| "loss": 1.1085, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 7.6502732240437155, | |
| "grad_norm": 5.258467674255371, | |
| "learning_rate": 3.469945355191257e-05, | |
| "loss": 1.0989, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.0947765111923218, | |
| "eval_runtime": 13.0622, | |
| "eval_samples_per_second": 7.043, | |
| "eval_steps_per_second": 3.522, | |
| "step": 1464 | |
| }, | |
| { | |
| "epoch": 8.19672131147541, | |
| "grad_norm": 2.7967240810394287, | |
| "learning_rate": 3.360655737704918e-05, | |
| "loss": 1.0848, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 8.743169398907105, | |
| "grad_norm": 5.457353115081787, | |
| "learning_rate": 3.251366120218579e-05, | |
| "loss": 1.0525, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1294028759002686, | |
| "eval_runtime": 13.0615, | |
| "eval_samples_per_second": 7.044, | |
| "eval_steps_per_second": 3.522, | |
| "step": 1647 | |
| }, | |
| { | |
| "epoch": 9.289617486338798, | |
| "grad_norm": 2.8257970809936523, | |
| "learning_rate": 3.142076502732241e-05, | |
| "loss": 0.9989, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 9.836065573770492, | |
| "grad_norm": 4.843544960021973, | |
| "learning_rate": 3.0327868852459017e-05, | |
| "loss": 1.1366, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1037259101867676, | |
| "eval_runtime": 13.0719, | |
| "eval_samples_per_second": 7.038, | |
| "eval_steps_per_second": 3.519, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 10.382513661202186, | |
| "grad_norm": 2.3387579917907715, | |
| "learning_rate": 2.9234972677595628e-05, | |
| "loss": 1.0349, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 10.92896174863388, | |
| "grad_norm": 4.286855697631836, | |
| "learning_rate": 2.814207650273224e-05, | |
| "loss": 1.0849, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.093202829360962, | |
| "eval_runtime": 13.0444, | |
| "eval_samples_per_second": 7.053, | |
| "eval_steps_per_second": 3.526, | |
| "step": 2013 | |
| }, | |
| { | |
| "epoch": 11.475409836065573, | |
| "grad_norm": 2.8780972957611084, | |
| "learning_rate": 2.7049180327868856e-05, | |
| "loss": 1.0844, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.11479651927948, | |
| "eval_runtime": 13.056, | |
| "eval_samples_per_second": 7.047, | |
| "eval_steps_per_second": 3.523, | |
| "step": 2196 | |
| }, | |
| { | |
| "epoch": 12.021857923497267, | |
| "grad_norm": 4.95583963394165, | |
| "learning_rate": 2.5956284153005467e-05, | |
| "loss": 1.0431, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 12.568306010928962, | |
| "grad_norm": 6.593821048736572, | |
| "learning_rate": 2.4863387978142078e-05, | |
| "loss": 1.0925, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1049476861953735, | |
| "eval_runtime": 13.0607, | |
| "eval_samples_per_second": 7.044, | |
| "eval_steps_per_second": 3.522, | |
| "step": 2379 | |
| }, | |
| { | |
| "epoch": 13.114754098360656, | |
| "grad_norm": 2.422586441040039, | |
| "learning_rate": 2.377049180327869e-05, | |
| "loss": 1.0433, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 13.66120218579235, | |
| "grad_norm": 2.4954915046691895, | |
| "learning_rate": 2.2677595628415303e-05, | |
| "loss": 1.09, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.110988974571228, | |
| "eval_runtime": 13.0548, | |
| "eval_samples_per_second": 7.047, | |
| "eval_steps_per_second": 3.524, | |
| "step": 2562 | |
| }, | |
| { | |
| "epoch": 14.207650273224044, | |
| "grad_norm": 7.657999038696289, | |
| "learning_rate": 2.1584699453551914e-05, | |
| "loss": 1.0834, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 14.754098360655737, | |
| "grad_norm": 6.67676305770874, | |
| "learning_rate": 2.0491803278688525e-05, | |
| "loss": 1.0739, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1128904819488525, | |
| "eval_runtime": 13.055, | |
| "eval_samples_per_second": 7.047, | |
| "eval_steps_per_second": 3.524, | |
| "step": 2745 | |
| }, | |
| { | |
| "epoch": 15.300546448087431, | |
| "grad_norm": 4.447215557098389, | |
| "learning_rate": 1.9398907103825135e-05, | |
| "loss": 1.0086, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 15.846994535519126, | |
| "grad_norm": 3.9617857933044434, | |
| "learning_rate": 1.830601092896175e-05, | |
| "loss": 1.0938, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1148039102554321, | |
| "eval_runtime": 13.0531, | |
| "eval_samples_per_second": 7.048, | |
| "eval_steps_per_second": 3.524, | |
| "step": 2928 | |
| }, | |
| { | |
| "epoch": 16.39344262295082, | |
| "grad_norm": 4.644863605499268, | |
| "learning_rate": 1.721311475409836e-05, | |
| "loss": 1.021, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 16.939890710382514, | |
| "grad_norm": 4.022730827331543, | |
| "learning_rate": 1.6120218579234975e-05, | |
| "loss": 1.0961, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.0970044136047363, | |
| "eval_runtime": 13.0521, | |
| "eval_samples_per_second": 7.049, | |
| "eval_steps_per_second": 3.524, | |
| "step": 3111 | |
| }, | |
| { | |
| "epoch": 17.48633879781421, | |
| "grad_norm": 5.2359089851379395, | |
| "learning_rate": 1.5027322404371585e-05, | |
| "loss": 1.0834, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.106469988822937, | |
| "eval_runtime": 13.0607, | |
| "eval_samples_per_second": 7.044, | |
| "eval_steps_per_second": 3.522, | |
| "step": 3294 | |
| }, | |
| { | |
| "epoch": 18.0327868852459, | |
| "grad_norm": 4.7237467765808105, | |
| "learning_rate": 1.3934426229508196e-05, | |
| "loss": 1.036, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 18.579234972677597, | |
| "grad_norm": 2.9745349884033203, | |
| "learning_rate": 1.284153005464481e-05, | |
| "loss": 1.0885, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1148598194122314, | |
| "eval_runtime": 13.089, | |
| "eval_samples_per_second": 7.029, | |
| "eval_steps_per_second": 3.514, | |
| "step": 3477 | |
| }, | |
| { | |
| "epoch": 19.12568306010929, | |
| "grad_norm": 4.162548542022705, | |
| "learning_rate": 1.1748633879781421e-05, | |
| "loss": 1.0579, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 19.672131147540984, | |
| "grad_norm": 4.825162410736084, | |
| "learning_rate": 1.0655737704918032e-05, | |
| "loss": 1.0789, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1134560108184814, | |
| "eval_runtime": 13.0842, | |
| "eval_samples_per_second": 7.031, | |
| "eval_steps_per_second": 3.516, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 20.218579234972676, | |
| "grad_norm": 6.329209327697754, | |
| "learning_rate": 9.562841530054644e-06, | |
| "loss": 1.0009, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 20.76502732240437, | |
| "grad_norm": 7.754738807678223, | |
| "learning_rate": 8.469945355191257e-06, | |
| "loss": 1.0893, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1135228872299194, | |
| "eval_runtime": 13.0668, | |
| "eval_samples_per_second": 7.041, | |
| "eval_steps_per_second": 3.52, | |
| "step": 3843 | |
| }, | |
| { | |
| "epoch": 21.311475409836067, | |
| "grad_norm": 5.315028190612793, | |
| "learning_rate": 7.3770491803278695e-06, | |
| "loss": 1.0579, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 21.85792349726776, | |
| "grad_norm": 5.136527061462402, | |
| "learning_rate": 6.284153005464481e-06, | |
| "loss": 1.0686, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1142159700393677, | |
| "eval_runtime": 13.0603, | |
| "eval_samples_per_second": 7.044, | |
| "eval_steps_per_second": 3.522, | |
| "step": 4026 | |
| }, | |
| { | |
| "epoch": 22.404371584699454, | |
| "grad_norm": 2.6005687713623047, | |
| "learning_rate": 5.191256830601094e-06, | |
| "loss": 1.0862, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 22.950819672131146, | |
| "grad_norm": 4.929677963256836, | |
| "learning_rate": 4.098360655737704e-06, | |
| "loss": 1.041, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1108310222625732, | |
| "eval_runtime": 13.056, | |
| "eval_samples_per_second": 7.047, | |
| "eval_steps_per_second": 3.523, | |
| "step": 4209 | |
| }, | |
| { | |
| "epoch": 23.497267759562842, | |
| "grad_norm": 4.816049098968506, | |
| "learning_rate": 3.005464480874317e-06, | |
| "loss": 1.0524, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1108192205429077, | |
| "eval_runtime": 13.0475, | |
| "eval_samples_per_second": 7.051, | |
| "eval_steps_per_second": 3.526, | |
| "step": 4392 | |
| }, | |
| { | |
| "epoch": 24.043715846994534, | |
| "grad_norm": 4.936235427856445, | |
| "learning_rate": 1.912568306010929e-06, | |
| "loss": 1.0767, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 24.59016393442623, | |
| "grad_norm": 4.7811665534973145, | |
| "learning_rate": 8.19672131147541e-07, | |
| "loss": 1.0675, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_f1": 0.2747629944426283, | |
| "eval_loss": 1.1084977388381958, | |
| "eval_runtime": 13.1143, | |
| "eval_samples_per_second": 7.015, | |
| "eval_steps_per_second": 3.508, | |
| "step": 4575 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 4575, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 25, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.219233942225e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |