| { | |
| "best_metric": 0.6402985074626866, | |
| "best_model_checkpoint": "logs/output_1/checkpoint-22500", | |
| "epoch": 20.0, | |
| "global_step": 30000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 9.833333333333333e-05, | |
| "loss": 0.2804, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 9.666666666666667e-05, | |
| "loss": 0.2126, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 9.5e-05, | |
| "loss": 0.2064, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.2440783679485321, | |
| "eval_macro-f1": 0.3285179361212921, | |
| "eval_micro-f1": 0.5178640030733768, | |
| "eval_runtime": 38.4141, | |
| "eval_samples_per_second": 26.032, | |
| "eval_steps_per_second": 4.347, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 9.333333333333334e-05, | |
| "loss": 0.1931, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 9.166666666666667e-05, | |
| "loss": 0.1909, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 9e-05, | |
| "loss": 0.1915, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.22793951630592346, | |
| "eval_macro-f1": 0.4252441757993691, | |
| "eval_micro-f1": 0.5836780744640844, | |
| "eval_runtime": 38.4078, | |
| "eval_samples_per_second": 26.036, | |
| "eval_steps_per_second": 4.348, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 8.833333333333333e-05, | |
| "loss": 0.1811, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 8.666666666666667e-05, | |
| "loss": 0.1858, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 8.5e-05, | |
| "loss": 0.1803, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.22564423084259033, | |
| "eval_macro-f1": 0.41935544051295165, | |
| "eval_micro-f1": 0.5869565217391305, | |
| "eval_runtime": 38.443, | |
| "eval_samples_per_second": 26.013, | |
| "eval_steps_per_second": 4.344, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 8.333333333333334e-05, | |
| "loss": 0.1754, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 8.166666666666667e-05, | |
| "loss": 0.1814, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 8e-05, | |
| "loss": 0.1799, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.22058944404125214, | |
| "eval_macro-f1": 0.4256290673151095, | |
| "eval_micro-f1": 0.583710407239819, | |
| "eval_runtime": 38.4982, | |
| "eval_samples_per_second": 25.975, | |
| "eval_steps_per_second": 4.338, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "learning_rate": 7.833333333333333e-05, | |
| "loss": 0.1723, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 4.67, | |
| "learning_rate": 7.666666666666667e-05, | |
| "loss": 0.1721, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 0.1781, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.21936117112636566, | |
| "eval_macro-f1": 0.4544580199122185, | |
| "eval_micro-f1": 0.6049795615013006, | |
| "eval_runtime": 38.3907, | |
| "eval_samples_per_second": 26.048, | |
| "eval_steps_per_second": 4.35, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "learning_rate": 7.333333333333333e-05, | |
| "loss": 0.1702, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 5.67, | |
| "learning_rate": 7.166666666666667e-05, | |
| "loss": 0.1735, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 7e-05, | |
| "loss": 0.1735, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.213190495967865, | |
| "eval_macro-f1": 0.5074201298504828, | |
| "eval_micro-f1": 0.6119235095613048, | |
| "eval_runtime": 38.4518, | |
| "eval_samples_per_second": 26.007, | |
| "eval_steps_per_second": 4.343, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 6.33, | |
| "learning_rate": 6.833333333333333e-05, | |
| "loss": 0.1703, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 6.67, | |
| "learning_rate": 6.667e-05, | |
| "loss": 0.167, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 6.500333333333333e-05, | |
| "loss": 0.1751, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.21256442368030548, | |
| "eval_macro-f1": 0.5106462817956516, | |
| "eval_micro-f1": 0.6118789689951437, | |
| "eval_runtime": 38.4476, | |
| "eval_samples_per_second": 26.009, | |
| "eval_steps_per_second": 4.344, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 7.33, | |
| "learning_rate": 6.333666666666667e-05, | |
| "loss": 0.1645, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 7.67, | |
| "learning_rate": 6.167000000000001e-05, | |
| "loss": 0.1728, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 6.000666666666667e-05, | |
| "loss": 0.1684, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.21494406461715698, | |
| "eval_macro-f1": 0.5146093696611631, | |
| "eval_micro-f1": 0.6274074074074073, | |
| "eval_runtime": 38.4777, | |
| "eval_samples_per_second": 25.989, | |
| "eval_steps_per_second": 4.34, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 8.33, | |
| "learning_rate": 5.834000000000001e-05, | |
| "loss": 0.1712, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 8.67, | |
| "learning_rate": 5.667333333333333e-05, | |
| "loss": 0.1631, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "learning_rate": 5.500666666666667e-05, | |
| "loss": 0.1678, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.21165955066680908, | |
| "eval_macro-f1": 0.4989095080248951, | |
| "eval_micro-f1": 0.6196341918626352, | |
| "eval_runtime": 38.4068, | |
| "eval_samples_per_second": 26.037, | |
| "eval_steps_per_second": 4.348, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 9.33, | |
| "learning_rate": 5.334e-05, | |
| "loss": 0.1665, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 9.67, | |
| "learning_rate": 5.167333333333334e-05, | |
| "loss": 0.1654, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 5.000666666666667e-05, | |
| "loss": 0.1654, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.21151196956634521, | |
| "eval_macro-f1": 0.5109214598543, | |
| "eval_micro-f1": 0.6257943925233643, | |
| "eval_runtime": 38.4501, | |
| "eval_samples_per_second": 26.008, | |
| "eval_steps_per_second": 4.343, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 10.33, | |
| "learning_rate": 4.834e-05, | |
| "loss": 0.164, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 10.67, | |
| "learning_rate": 4.6676666666666666e-05, | |
| "loss": 0.1638, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "learning_rate": 4.5010000000000004e-05, | |
| "loss": 0.1657, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 0.2080332338809967, | |
| "eval_macro-f1": 0.5133127262093694, | |
| "eval_micro-f1": 0.6190654205607478, | |
| "eval_runtime": 38.4183, | |
| "eval_samples_per_second": 26.029, | |
| "eval_steps_per_second": 4.347, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 11.33, | |
| "learning_rate": 4.3343333333333336e-05, | |
| "loss": 0.1616, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 11.67, | |
| "learning_rate": 4.167666666666667e-05, | |
| "loss": 0.1666, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 4.001333333333334e-05, | |
| "loss": 0.1611, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 0.2084576040506363, | |
| "eval_macro-f1": 0.5106090655093949, | |
| "eval_micro-f1": 0.6291635825314581, | |
| "eval_runtime": 38.4878, | |
| "eval_samples_per_second": 25.982, | |
| "eval_steps_per_second": 4.339, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 12.33, | |
| "learning_rate": 3.8350000000000004e-05, | |
| "loss": 0.1613, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 12.67, | |
| "learning_rate": 3.6683333333333335e-05, | |
| "loss": 0.1649, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "learning_rate": 3.501666666666667e-05, | |
| "loss": 0.1609, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_loss": 0.21025387942790985, | |
| "eval_macro-f1": 0.5193025997190498, | |
| "eval_micro-f1": 0.6298629121896998, | |
| "eval_runtime": 38.3811, | |
| "eval_samples_per_second": 26.054, | |
| "eval_steps_per_second": 4.351, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 13.33, | |
| "learning_rate": 3.3350000000000004e-05, | |
| "loss": 0.1609, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 13.67, | |
| "learning_rate": 3.1683333333333335e-05, | |
| "loss": 0.162, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "learning_rate": 3.001666666666667e-05, | |
| "loss": 0.1637, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_loss": 0.20812740921974182, | |
| "eval_macro-f1": 0.5376978183350004, | |
| "eval_micro-f1": 0.6320895522388059, | |
| "eval_runtime": 38.4982, | |
| "eval_samples_per_second": 25.975, | |
| "eval_steps_per_second": 4.338, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 14.33, | |
| "learning_rate": 2.8349999999999998e-05, | |
| "loss": 0.1594, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 14.67, | |
| "learning_rate": 2.6683333333333333e-05, | |
| "loss": 0.1597, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 2.5019999999999998e-05, | |
| "loss": 0.1639, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_loss": 0.20513254404067993, | |
| "eval_macro-f1": 0.5308672693299893, | |
| "eval_micro-f1": 0.6402985074626866, | |
| "eval_runtime": 38.4819, | |
| "eval_samples_per_second": 25.986, | |
| "eval_steps_per_second": 4.34, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 15.33, | |
| "learning_rate": 2.335666666666667e-05, | |
| "loss": 0.1549, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 15.67, | |
| "learning_rate": 2.169e-05, | |
| "loss": 0.1625, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "learning_rate": 2.0023333333333335e-05, | |
| "loss": 0.1629, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_loss": 0.2067381590604782, | |
| "eval_macro-f1": 0.5235026961639551, | |
| "eval_micro-f1": 0.6344005956813105, | |
| "eval_runtime": 38.3766, | |
| "eval_samples_per_second": 26.058, | |
| "eval_steps_per_second": 4.352, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 16.33, | |
| "learning_rate": 1.8356666666666667e-05, | |
| "loss": 0.1594, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 16.67, | |
| "learning_rate": 1.669e-05, | |
| "loss": 0.1623, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "learning_rate": 1.5023333333333334e-05, | |
| "loss": 0.1555, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_loss": 0.20664575695991516, | |
| "eval_macro-f1": 0.5278087298915135, | |
| "eval_micro-f1": 0.632286995515695, | |
| "eval_runtime": 38.476, | |
| "eval_samples_per_second": 25.99, | |
| "eval_steps_per_second": 4.34, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 17.33, | |
| "learning_rate": 1.336e-05, | |
| "loss": 0.1551, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 17.67, | |
| "learning_rate": 1.1693333333333334e-05, | |
| "loss": 0.1633, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "learning_rate": 1.0026666666666668e-05, | |
| "loss": 0.1609, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_loss": 0.20542995631694794, | |
| "eval_macro-f1": 0.5294287867927951, | |
| "eval_micro-f1": 0.6334706102583302, | |
| "eval_runtime": 38.3677, | |
| "eval_samples_per_second": 26.064, | |
| "eval_steps_per_second": 4.353, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 18.33, | |
| "learning_rate": 8.36e-06, | |
| "loss": 0.1537, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 18.67, | |
| "learning_rate": 6.693333333333333e-06, | |
| "loss": 0.1585, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "learning_rate": 5.03e-06, | |
| "loss": 0.165, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_loss": 0.20603427290916443, | |
| "eval_macro-f1": 0.5366368871906745, | |
| "eval_micro-f1": 0.6378861183475996, | |
| "eval_runtime": 38.431, | |
| "eval_samples_per_second": 26.021, | |
| "eval_steps_per_second": 4.345, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 19.33, | |
| "learning_rate": 3.3633333333333335e-06, | |
| "loss": 0.1605, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 19.67, | |
| "learning_rate": 1.6966666666666668e-06, | |
| "loss": 0.1637, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 3e-08, | |
| "loss": 0.15, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_loss": 0.2060048133134842, | |
| "eval_macro-f1": 0.5353270271604428, | |
| "eval_micro-f1": 0.6362957430918595, | |
| "eval_runtime": 38.3799, | |
| "eval_samples_per_second": 26.055, | |
| "eval_steps_per_second": 4.351, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "step": 30000, | |
| "total_flos": 1.0577426743296e+17, | |
| "train_loss": 0.17072735188802082, | |
| "train_runtime": 8560.8109, | |
| "train_samples_per_second": 21.026, | |
| "train_steps_per_second": 3.504 | |
| } | |
| ], | |
| "max_steps": 30000, | |
| "num_train_epochs": 20, | |
| "total_flos": 1.0577426743296e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |