| { | |
| "best_metric": 0.7404754757881165, | |
| "best_model_checkpoint": "./twitter-xlmr-clip-finetuned-all-123/checkpoint-11500", | |
| "epoch": 3.1866281866281865, | |
| "eval_steps": 500, | |
| "global_step": 26500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 13.600483894348145, | |
| "learning_rate": 4.994047619047619e-05, | |
| "loss": 0.6444, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "eval_f1": 0.41968345507991583, | |
| "eval_loss": 0.8771085143089294, | |
| "eval_precision": 0.6905442329947281, | |
| "eval_recall": 0.4537466834241028, | |
| "eval_runtime": 9.714, | |
| "eval_samples_per_second": 90.385, | |
| "eval_steps_per_second": 5.662, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 3.1294376850128174, | |
| "learning_rate": 4.988035113035113e-05, | |
| "loss": 0.5499, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "eval_f1": 0.41168390354058104, | |
| "eval_loss": 0.8166923522949219, | |
| "eval_precision": 0.7197164083396941, | |
| "eval_recall": 0.4260270911883815, | |
| "eval_runtime": 10.112, | |
| "eval_samples_per_second": 86.827, | |
| "eval_steps_per_second": 5.439, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 3.979219913482666, | |
| "learning_rate": 4.982022607022607e-05, | |
| "loss": 0.5357, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "eval_f1": 0.44236857569210003, | |
| "eval_loss": 0.8084450364112854, | |
| "eval_precision": 0.7263357669536159, | |
| "eval_recall": 0.4695992179863147, | |
| "eval_runtime": 9.4795, | |
| "eval_samples_per_second": 92.621, | |
| "eval_steps_per_second": 5.802, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 4.673430442810059, | |
| "learning_rate": 4.9760341510341516e-05, | |
| "loss": 0.5175, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_f1": 0.37174117930428513, | |
| "eval_loss": 0.8703882098197937, | |
| "eval_precision": 0.6665612592670672, | |
| "eval_recall": 0.426614532420984, | |
| "eval_runtime": 10.0742, | |
| "eval_samples_per_second": 87.153, | |
| "eval_steps_per_second": 5.459, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 4.538591384887695, | |
| "learning_rate": 4.9700216450216456e-05, | |
| "loss": 0.5285, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "eval_f1": 0.42208412407033724, | |
| "eval_loss": 0.906663179397583, | |
| "eval_precision": 0.7528800535330206, | |
| "eval_recall": 0.45646604291765575, | |
| "eval_runtime": 11.1647, | |
| "eval_samples_per_second": 78.641, | |
| "eval_steps_per_second": 4.926, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 3.0253360271453857, | |
| "learning_rate": 4.964009139009139e-05, | |
| "loss": 0.5081, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "eval_f1": 0.6355885803741356, | |
| "eval_loss": 0.7414107322692871, | |
| "eval_precision": 0.7654892081053667, | |
| "eval_recall": 0.6113652655588139, | |
| "eval_runtime": 10.1707, | |
| "eval_samples_per_second": 86.326, | |
| "eval_steps_per_second": 5.408, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 7.686459541320801, | |
| "learning_rate": 4.957996632996633e-05, | |
| "loss": 0.506, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "eval_f1": 0.5785601517423107, | |
| "eval_loss": 0.8712885975837708, | |
| "eval_precision": 0.5829865278619871, | |
| "eval_recall": 0.659145370758274, | |
| "eval_runtime": 9.4937, | |
| "eval_samples_per_second": 92.483, | |
| "eval_steps_per_second": 5.793, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 5.288177967071533, | |
| "learning_rate": 4.951984126984127e-05, | |
| "loss": 0.5049, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_f1": 0.4464146254881079, | |
| "eval_loss": 0.751436173915863, | |
| "eval_precision": 0.5550960735171261, | |
| "eval_recall": 0.4567546432062561, | |
| "eval_runtime": 10.1399, | |
| "eval_samples_per_second": 86.589, | |
| "eval_steps_per_second": 5.424, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 2.32623028755188, | |
| "learning_rate": 4.945971620971621e-05, | |
| "loss": 0.4999, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "eval_f1": 0.5767450661581325, | |
| "eval_loss": 0.7584463357925415, | |
| "eval_precision": 0.6519354622940794, | |
| "eval_recall": 0.5502052785923753, | |
| "eval_runtime": 9.964, | |
| "eval_samples_per_second": 88.117, | |
| "eval_steps_per_second": 5.52, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 8.978556632995605, | |
| "learning_rate": 4.939959114959115e-05, | |
| "loss": 0.507, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "eval_f1": 0.5635568884134308, | |
| "eval_loss": 0.8071982860565186, | |
| "eval_precision": 0.6478587943382395, | |
| "eval_recall": 0.5625815761299632, | |
| "eval_runtime": 9.922, | |
| "eval_samples_per_second": 88.49, | |
| "eval_steps_per_second": 5.543, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 5.773237228393555, | |
| "learning_rate": 4.933946608946609e-05, | |
| "loss": 0.5048, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "eval_f1": 0.5730277716509274, | |
| "eval_loss": 0.8080196380615234, | |
| "eval_precision": 0.6259898141973613, | |
| "eval_recall": 0.5725429409300378, | |
| "eval_runtime": 10.1691, | |
| "eval_samples_per_second": 86.34, | |
| "eval_steps_per_second": 5.409, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 4.596500873565674, | |
| "learning_rate": 4.9279461279461284e-05, | |
| "loss": 0.4907, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "eval_f1": 0.522356162116029, | |
| "eval_loss": 0.7966476082801819, | |
| "eval_precision": 0.6975785731883293, | |
| "eval_recall": 0.5138146441372248, | |
| "eval_runtime": 10.1124, | |
| "eval_samples_per_second": 86.824, | |
| "eval_steps_per_second": 5.439, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 4.815864562988281, | |
| "learning_rate": 4.9219336219336224e-05, | |
| "loss": 0.493, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "eval_f1": 0.4921730387816156, | |
| "eval_loss": 0.8192508220672607, | |
| "eval_precision": 0.7098761086589401, | |
| "eval_recall": 0.4948889819857562, | |
| "eval_runtime": 10.12, | |
| "eval_samples_per_second": 86.759, | |
| "eval_steps_per_second": 5.435, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 2.7458622455596924, | |
| "learning_rate": 4.915921115921116e-05, | |
| "loss": 0.4668, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "eval_f1": 0.6501053053856183, | |
| "eval_loss": 0.7501620650291443, | |
| "eval_precision": 0.6281644378579914, | |
| "eval_recall": 0.6941954103244425, | |
| "eval_runtime": 9.7011, | |
| "eval_samples_per_second": 90.505, | |
| "eval_steps_per_second": 5.669, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 6.319346904754639, | |
| "learning_rate": 4.90990860990861e-05, | |
| "loss": 0.4717, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "eval_f1": 0.5190561121359437, | |
| "eval_loss": 0.7636385560035706, | |
| "eval_precision": 0.6372459240106298, | |
| "eval_recall": 0.5109072289717451, | |
| "eval_runtime": 9.9019, | |
| "eval_samples_per_second": 88.67, | |
| "eval_steps_per_second": 5.554, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 4.232970714569092, | |
| "learning_rate": 4.903896103896104e-05, | |
| "loss": 0.4774, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "eval_f1": 0.5587125288791309, | |
| "eval_loss": 0.765232503414154, | |
| "eval_precision": 0.7513256527294695, | |
| "eval_recall": 0.5360405902341386, | |
| "eval_runtime": 10.3268, | |
| "eval_samples_per_second": 85.022, | |
| "eval_steps_per_second": 5.326, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 6.469419479370117, | |
| "learning_rate": 4.897883597883598e-05, | |
| "loss": 0.4676, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "eval_f1": 0.5836074412050168, | |
| "eval_loss": 0.8481860756874084, | |
| "eval_precision": 0.6371603008082242, | |
| "eval_recall": 0.5918316808639389, | |
| "eval_runtime": 10.1025, | |
| "eval_samples_per_second": 86.909, | |
| "eval_steps_per_second": 5.444, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 6.76945161819458, | |
| "learning_rate": 4.891871091871092e-05, | |
| "loss": 0.4361, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "eval_f1": 0.5175478304159831, | |
| "eval_loss": 0.7456216812133789, | |
| "eval_precision": 0.668652900688299, | |
| "eval_recall": 0.5176800260671229, | |
| "eval_runtime": 9.5682, | |
| "eval_samples_per_second": 91.762, | |
| "eval_steps_per_second": 5.748, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "grad_norm": 11.62441349029541, | |
| "learning_rate": 4.885858585858586e-05, | |
| "loss": 0.4536, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "eval_f1": 0.5155711517804843, | |
| "eval_loss": 0.8448612689971924, | |
| "eval_precision": 0.73633642018397, | |
| "eval_recall": 0.5160256947353722, | |
| "eval_runtime": 10.0725, | |
| "eval_samples_per_second": 87.168, | |
| "eval_steps_per_second": 5.46, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 6.764497756958008, | |
| "learning_rate": 4.879858104858105e-05, | |
| "loss": 0.4277, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "eval_f1": 0.5173349712389866, | |
| "eval_loss": 0.8647661805152893, | |
| "eval_precision": 0.6381631231821719, | |
| "eval_recall": 0.5247414234511009, | |
| "eval_runtime": 10.0414, | |
| "eval_samples_per_second": 87.438, | |
| "eval_steps_per_second": 5.477, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "grad_norm": 6.405815601348877, | |
| "learning_rate": 4.873845598845599e-05, | |
| "loss": 0.4444, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "eval_f1": 0.5958880582085585, | |
| "eval_loss": 0.8722940683364868, | |
| "eval_precision": 0.5870961573122971, | |
| "eval_recall": 0.6621933621933621, | |
| "eval_runtime": 10.0728, | |
| "eval_samples_per_second": 87.166, | |
| "eval_steps_per_second": 5.46, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 3.5875844955444336, | |
| "learning_rate": 4.8678330928330925e-05, | |
| "loss": 0.4269, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "eval_f1": 0.5525570018606619, | |
| "eval_loss": 0.7856002449989319, | |
| "eval_precision": 0.6150631452676062, | |
| "eval_recall": 0.5520914211236793, | |
| "eval_runtime": 9.6968, | |
| "eval_samples_per_second": 90.545, | |
| "eval_steps_per_second": 5.672, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "grad_norm": 8.949612617492676, | |
| "learning_rate": 4.8618205868205866e-05, | |
| "loss": 0.4322, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "eval_f1": 0.6400986522737416, | |
| "eval_loss": 0.7404754757881165, | |
| "eval_precision": 0.6430963017398649, | |
| "eval_recall": 0.6553870502257598, | |
| "eval_runtime": 10.249, | |
| "eval_samples_per_second": 85.667, | |
| "eval_steps_per_second": 5.366, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 4.530700206756592, | |
| "learning_rate": 4.8558080808080806e-05, | |
| "loss": 0.4435, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "eval_f1": 0.592282401280643, | |
| "eval_loss": 0.7682257294654846, | |
| "eval_precision": 0.6568389647238856, | |
| "eval_recall": 0.5751068286552158, | |
| "eval_runtime": 9.2229, | |
| "eval_samples_per_second": 95.197, | |
| "eval_steps_per_second": 5.963, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 8.574906349182129, | |
| "learning_rate": 4.8497955747955746e-05, | |
| "loss": 0.4429, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "eval_f1": 0.5544893324569983, | |
| "eval_loss": 0.8823776245117188, | |
| "eval_precision": 0.595552820840211, | |
| "eval_recall": 0.6006423683843037, | |
| "eval_runtime": 9.7179, | |
| "eval_samples_per_second": 90.349, | |
| "eval_steps_per_second": 5.66, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 5.551417827606201, | |
| "learning_rate": 4.843783068783069e-05, | |
| "loss": 0.4381, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "eval_f1": 0.4394867511583576, | |
| "eval_loss": 0.7878994941711426, | |
| "eval_precision": 0.44570791527313264, | |
| "eval_recall": 0.4727226178839082, | |
| "eval_runtime": 9.3935, | |
| "eval_samples_per_second": 93.469, | |
| "eval_steps_per_second": 5.855, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "grad_norm": 3.644160032272339, | |
| "learning_rate": 4.837770562770563e-05, | |
| "loss": 0.4389, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "eval_f1": 0.6502064155668764, | |
| "eval_loss": 0.7554711103439331, | |
| "eval_precision": 0.625995830541285, | |
| "eval_recall": 0.6983587022296699, | |
| "eval_runtime": 10.4343, | |
| "eval_samples_per_second": 84.145, | |
| "eval_steps_per_second": 5.271, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "grad_norm": 13.295586585998535, | |
| "learning_rate": 4.8317580567580574e-05, | |
| "loss": 0.4529, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "eval_f1": 0.5662591977213387, | |
| "eval_loss": 0.7981444597244263, | |
| "eval_precision": 0.6620850014647007, | |
| "eval_recall": 0.5546497230368198, | |
| "eval_runtime": 10.4044, | |
| "eval_samples_per_second": 84.388, | |
| "eval_steps_per_second": 5.286, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "grad_norm": 5.176441192626953, | |
| "learning_rate": 4.825757575757576e-05, | |
| "loss": 0.4509, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "eval_f1": 0.6171621860629642, | |
| "eval_loss": 0.7826597690582275, | |
| "eval_precision": 0.6159527120477252, | |
| "eval_recall": 0.6321109714658103, | |
| "eval_runtime": 10.9264, | |
| "eval_samples_per_second": 80.356, | |
| "eval_steps_per_second": 5.034, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 5.209570407867432, | |
| "learning_rate": 4.81974506974507e-05, | |
| "loss": 0.4413, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "eval_f1": 0.6285207961745803, | |
| "eval_loss": 0.7894985675811768, | |
| "eval_precision": 0.6381086170916679, | |
| "eval_recall": 0.6356514453288648, | |
| "eval_runtime": 10.0334, | |
| "eval_samples_per_second": 87.508, | |
| "eval_steps_per_second": 5.482, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "grad_norm": 6.420750141143799, | |
| "learning_rate": 4.813732563732564e-05, | |
| "loss": 0.4198, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "eval_f1": 0.5602075791622673, | |
| "eval_loss": 0.8344631195068359, | |
| "eval_precision": 0.593987244882881, | |
| "eval_recall": 0.5526006609877578, | |
| "eval_runtime": 9.8155, | |
| "eval_samples_per_second": 89.45, | |
| "eval_steps_per_second": 5.603, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 18.093042373657227, | |
| "learning_rate": 4.807720057720058e-05, | |
| "loss": 0.4415, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "eval_f1": 0.6458964698548891, | |
| "eval_loss": 0.8745749592781067, | |
| "eval_precision": 0.6614986284672897, | |
| "eval_recall": 0.6612456360843457, | |
| "eval_runtime": 10.324, | |
| "eval_samples_per_second": 85.044, | |
| "eval_steps_per_second": 5.327, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "grad_norm": 3.2640116214752197, | |
| "learning_rate": 4.801707551707552e-05, | |
| "loss": 0.443, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "eval_f1": 0.5352104934017916, | |
| "eval_loss": 0.8155062794685364, | |
| "eval_precision": 0.6515944452522903, | |
| "eval_recall": 0.5264720942140296, | |
| "eval_runtime": 10.2184, | |
| "eval_samples_per_second": 85.923, | |
| "eval_steps_per_second": 5.382, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "grad_norm": 6.075052738189697, | |
| "learning_rate": 4.795695045695046e-05, | |
| "loss": 0.4068, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "eval_f1": 0.5975450943097163, | |
| "eval_loss": 0.7641533613204956, | |
| "eval_precision": 0.5837727558066541, | |
| "eval_recall": 0.6219876181166503, | |
| "eval_runtime": 9.7472, | |
| "eval_samples_per_second": 90.077, | |
| "eval_steps_per_second": 5.643, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 2.704364538192749, | |
| "learning_rate": 4.78968253968254e-05, | |
| "loss": 0.3905, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "eval_f1": 0.573958551424587, | |
| "eval_loss": 0.7928948998451233, | |
| "eval_precision": 0.6719886309250943, | |
| "eval_recall": 0.5555350742447516, | |
| "eval_runtime": 9.8882, | |
| "eval_samples_per_second": 88.792, | |
| "eval_steps_per_second": 5.562, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 2.702869176864624, | |
| "learning_rate": 4.783670033670034e-05, | |
| "loss": 0.3969, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "eval_f1": 0.46873679220558384, | |
| "eval_loss": 0.8949081897735596, | |
| "eval_precision": 0.5330288849723479, | |
| "eval_recall": 0.4771326164874552, | |
| "eval_runtime": 9.7502, | |
| "eval_samples_per_second": 90.05, | |
| "eval_steps_per_second": 5.641, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "grad_norm": 2.651677370071411, | |
| "learning_rate": 4.777657527657528e-05, | |
| "loss": 0.3841, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "eval_f1": 0.5492398227556807, | |
| "eval_loss": 0.9232780933380127, | |
| "eval_precision": 0.6027874564459931, | |
| "eval_recall": 0.5410119629474468, | |
| "eval_runtime": 9.8226, | |
| "eval_samples_per_second": 89.386, | |
| "eval_steps_per_second": 5.599, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "grad_norm": 2.9351656436920166, | |
| "learning_rate": 4.771645021645022e-05, | |
| "loss": 0.4031, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "eval_f1": 0.5776004425555427, | |
| "eval_loss": 0.7720491290092468, | |
| "eval_precision": 0.6088870316942702, | |
| "eval_recall": 0.5719396732299958, | |
| "eval_runtime": 9.6849, | |
| "eval_samples_per_second": 90.656, | |
| "eval_steps_per_second": 5.679, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "grad_norm": 10.01524829864502, | |
| "learning_rate": 4.765632515632516e-05, | |
| "loss": 0.3878, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "eval_f1": 0.5317940446482866, | |
| "eval_loss": 0.9046021103858948, | |
| "eval_precision": 0.6265449365449366, | |
| "eval_recall": 0.5357957454731649, | |
| "eval_runtime": 9.4111, | |
| "eval_samples_per_second": 93.294, | |
| "eval_steps_per_second": 5.844, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "grad_norm": 1.4061155319213867, | |
| "learning_rate": 4.75962000962001e-05, | |
| "loss": 0.4001, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "eval_f1": 0.5760553799542051, | |
| "eval_loss": 0.8451133370399475, | |
| "eval_precision": 0.6959572062018271, | |
| "eval_recall": 0.562243634501699, | |
| "eval_runtime": 9.8527, | |
| "eval_samples_per_second": 89.112, | |
| "eval_steps_per_second": 5.582, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "grad_norm": 2.654297113418579, | |
| "learning_rate": 4.753607503607504e-05, | |
| "loss": 0.3997, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "eval_f1": 0.5540787433460292, | |
| "eval_loss": 0.8963695168495178, | |
| "eval_precision": 0.6169583136571495, | |
| "eval_recall": 0.5665354000837871, | |
| "eval_runtime": 9.8314, | |
| "eval_samples_per_second": 89.306, | |
| "eval_steps_per_second": 5.594, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "grad_norm": 2.318394184112549, | |
| "learning_rate": 4.747607022607023e-05, | |
| "loss": 0.3945, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "eval_f1": 0.5195041284097078, | |
| "eval_loss": 0.8000777363777161, | |
| "eval_precision": 0.5553255357437211, | |
| "eval_recall": 0.5180002792906019, | |
| "eval_runtime": 10.1975, | |
| "eval_samples_per_second": 86.099, | |
| "eval_steps_per_second": 5.393, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "grad_norm": 6.022315502166748, | |
| "learning_rate": 4.741594516594517e-05, | |
| "loss": 0.4005, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "eval_f1": 0.5169696711549415, | |
| "eval_loss": 0.8356830477714539, | |
| "eval_precision": 0.5518686564828851, | |
| "eval_recall": 0.5099939487036261, | |
| "eval_runtime": 9.9411, | |
| "eval_samples_per_second": 88.32, | |
| "eval_steps_per_second": 5.533, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "grad_norm": 3.7972123622894287, | |
| "learning_rate": 4.735582010582011e-05, | |
| "loss": 0.3907, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "eval_f1": 0.5552096784695296, | |
| "eval_loss": 0.8016623258590698, | |
| "eval_precision": 0.5884397258734974, | |
| "eval_recall": 0.5408509053670344, | |
| "eval_runtime": 9.5287, | |
| "eval_samples_per_second": 92.142, | |
| "eval_steps_per_second": 5.772, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "grad_norm": 6.792105674743652, | |
| "learning_rate": 4.729569504569505e-05, | |
| "loss": 0.3858, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "eval_f1": 0.5861875804431639, | |
| "eval_loss": 0.8283036351203918, | |
| "eval_precision": 0.6035754454158798, | |
| "eval_recall": 0.5791807475678443, | |
| "eval_runtime": 9.9189, | |
| "eval_samples_per_second": 88.518, | |
| "eval_steps_per_second": 5.545, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "grad_norm": 1.6373685598373413, | |
| "learning_rate": 4.723556998556999e-05, | |
| "loss": 0.3973, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "eval_f1": 0.5393179472998001, | |
| "eval_loss": 0.9024349451065063, | |
| "eval_precision": 0.5769620560542147, | |
| "eval_recall": 0.5664646464646464, | |
| "eval_runtime": 9.6142, | |
| "eval_samples_per_second": 91.324, | |
| "eval_steps_per_second": 5.721, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "grad_norm": 2.8107876777648926, | |
| "learning_rate": 4.717544492544493e-05, | |
| "loss": 0.3969, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "eval_f1": 0.5558204883761618, | |
| "eval_loss": 0.8340879678726196, | |
| "eval_precision": 0.564202659920544, | |
| "eval_recall": 0.5527868547223386, | |
| "eval_runtime": 9.7568, | |
| "eval_samples_per_second": 89.988, | |
| "eval_steps_per_second": 5.637, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "grad_norm": 3.1326379776000977, | |
| "learning_rate": 4.7115319865319865e-05, | |
| "loss": 0.3911, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "eval_f1": 0.5070306794950141, | |
| "eval_loss": 0.8966326117515564, | |
| "eval_precision": 0.6045140236228393, | |
| "eval_recall": 0.5087874133035424, | |
| "eval_runtime": 140.3321, | |
| "eval_samples_per_second": 6.257, | |
| "eval_steps_per_second": 0.392, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "grad_norm": 4.015880107879639, | |
| "learning_rate": 4.7055194805194805e-05, | |
| "loss": 0.3856, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "eval_f1": 0.5688512961191111, | |
| "eval_loss": 0.8348873853683472, | |
| "eval_precision": 0.6021456496959962, | |
| "eval_recall": 0.5586072708653353, | |
| "eval_runtime": 77.3964, | |
| "eval_samples_per_second": 11.344, | |
| "eval_steps_per_second": 0.711, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "grad_norm": 10.381326675415039, | |
| "learning_rate": 4.699518999519e-05, | |
| "loss": 0.3961, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "eval_f1": 0.558529842992594, | |
| "eval_loss": 0.936424195766449, | |
| "eval_precision": 0.6119355440337201, | |
| "eval_recall": 0.5411525392170553, | |
| "eval_runtime": 9.7898, | |
| "eval_samples_per_second": 89.685, | |
| "eval_steps_per_second": 5.618, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "grad_norm": 4.446761608123779, | |
| "learning_rate": 4.693506493506494e-05, | |
| "loss": 0.3301, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "eval_f1": 0.5812822447385155, | |
| "eval_loss": 0.9541723728179932, | |
| "eval_precision": 0.5757327569674934, | |
| "eval_recall": 0.6084206116464181, | |
| "eval_runtime": 9.902, | |
| "eval_samples_per_second": 88.669, | |
| "eval_steps_per_second": 5.554, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "grad_norm": 4.501039981842041, | |
| "learning_rate": 4.687493987493988e-05, | |
| "loss": 0.3385, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "eval_f1": 0.5345940260961672, | |
| "eval_loss": 1.0137349367141724, | |
| "eval_precision": 0.5563381916292068, | |
| "eval_recall": 0.5293767164734907, | |
| "eval_runtime": 10.1055, | |
| "eval_samples_per_second": 86.883, | |
| "eval_steps_per_second": 5.443, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "grad_norm": 1.9400558471679688, | |
| "learning_rate": 4.681481481481482e-05, | |
| "loss": 0.3475, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "eval_f1": 0.5822160079727499, | |
| "eval_loss": 0.9310874938964844, | |
| "eval_precision": 0.6358738260592424, | |
| "eval_recall": 0.5675203649397198, | |
| "eval_runtime": 9.9305, | |
| "eval_samples_per_second": 88.414, | |
| "eval_steps_per_second": 5.538, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "step": 26500, | |
| "total_flos": 0.0, | |
| "train_loss": 0.44209005161501325, | |
| "train_runtime": 9420.7211, | |
| "train_samples_per_second": 706.135, | |
| "train_steps_per_second": 44.137 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 415800, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |