| { | |
| "best_metric": 0.18451988697052002, | |
| "best_model_checkpoint": "./models/results_one_liners_693/checkpoint-325", | |
| "epoch": 0.9374436632413917, | |
| "global_step": 325, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.6947, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.6932, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.6919, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "eval_accuracy": 0.6413249211356467, | |
| "eval_f1": 0.7321554770318023, | |
| "eval_loss": 0.6814908981323242, | |
| "eval_precision": 0.5779100037188546, | |
| "eval_recall": 0.9987146529562982, | |
| "eval_runtime": 21.2769, | |
| "eval_samples_per_second": 148.988, | |
| "eval_steps_per_second": 18.659, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.673, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 2e-05, | |
| "loss": 0.6191, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.4e-05, | |
| "loss": 0.4553, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_accuracy": 0.8596214511041009, | |
| "eval_f1": 0.8499156829679595, | |
| "eval_loss": 0.34521493315696716, | |
| "eval_precision": 0.8942512420156139, | |
| "eval_recall": 0.8097686375321337, | |
| "eval_runtime": 20.9603, | |
| "eval_samples_per_second": 151.238, | |
| "eval_steps_per_second": 18.941, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 2.8000000000000003e-05, | |
| "loss": 0.344, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.3474, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 3.6e-05, | |
| "loss": 0.363, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "eval_accuracy": 0.8933753943217666, | |
| "eval_f1": 0.8901884340480831, | |
| "eval_loss": 0.25991594791412354, | |
| "eval_precision": 0.900131406044678, | |
| "eval_recall": 0.8804627249357326, | |
| "eval_runtime": 20.8982, | |
| "eval_samples_per_second": 151.688, | |
| "eval_steps_per_second": 18.997, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 3.9500000000000005e-05, | |
| "loss": 0.3698, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.35e-05, | |
| "loss": 0.2855, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.75e-05, | |
| "loss": 0.3564, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "eval_accuracy": 0.8902208201892744, | |
| "eval_f1": 0.8932515337423313, | |
| "eval_loss": 0.2616112232208252, | |
| "eval_precision": 0.8544600938967136, | |
| "eval_recall": 0.9357326478149101, | |
| "eval_runtime": 20.9191, | |
| "eval_samples_per_second": 151.536, | |
| "eval_steps_per_second": 18.978, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 4.9390243902439024e-05, | |
| "loss": 0.271, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 4.776422764227643e-05, | |
| "loss": 0.3519, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 4.613821138211382e-05, | |
| "loss": 0.3108, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "eval_accuracy": 0.8943217665615142, | |
| "eval_f1": 0.8949513954217623, | |
| "eval_loss": 0.2482648640871048, | |
| "eval_precision": 0.8738518064911206, | |
| "eval_recall": 0.9170951156812339, | |
| "eval_runtime": 20.9132, | |
| "eval_samples_per_second": 151.579, | |
| "eval_steps_per_second": 18.983, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.451219512195122e-05, | |
| "loss": 0.2794, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 4.2886178861788616e-05, | |
| "loss": 0.3446, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 4.146341463414634e-05, | |
| "loss": 0.2649, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "eval_accuracy": 0.9123028391167193, | |
| "eval_f1": 0.9100905562742561, | |
| "eval_loss": 0.22195443511009216, | |
| "eval_precision": 0.916015625, | |
| "eval_recall": 0.9042416452442159, | |
| "eval_runtime": 20.8531, | |
| "eval_samples_per_second": 152.016, | |
| "eval_steps_per_second": 19.038, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 3.983739837398374e-05, | |
| "loss": 0.2772, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 3.8211382113821145e-05, | |
| "loss": 0.2738, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 3.6585365853658535e-05, | |
| "loss": 0.3146, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "eval_accuracy": 0.9082018927444795, | |
| "eval_f1": 0.9041817583141256, | |
| "eval_loss": 0.22258049249649048, | |
| "eval_precision": 0.9270762997974341, | |
| "eval_recall": 0.8823907455012854, | |
| "eval_runtime": 20.9525, | |
| "eval_samples_per_second": 151.295, | |
| "eval_steps_per_second": 18.948, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 3.495934959349594e-05, | |
| "loss": 0.2606, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.1921, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 3.170731707317073e-05, | |
| "loss": 0.1973, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 3.0081300813008135e-05, | |
| "loss": 0.2459, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "eval_accuracy": 0.8917981072555206, | |
| "eval_f1": 0.8987304399173309, | |
| "eval_loss": 0.27942967414855957, | |
| "eval_precision": 0.8312397596941562, | |
| "eval_recall": 0.9781491002570694, | |
| "eval_runtime": 21.1312, | |
| "eval_samples_per_second": 150.015, | |
| "eval_steps_per_second": 18.787, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 2.8455284552845528e-05, | |
| "loss": 0.2524, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 2.682926829268293e-05, | |
| "loss": 0.2249, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 2.5203252032520324e-05, | |
| "loss": 0.286, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "eval_accuracy": 0.9261829652996846, | |
| "eval_f1": 0.9265536723163842, | |
| "eval_loss": 0.20206698775291443, | |
| "eval_precision": 0.905521472392638, | |
| "eval_recall": 0.9485861182519281, | |
| "eval_runtime": 21.0759, | |
| "eval_samples_per_second": 150.409, | |
| "eval_steps_per_second": 18.837, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 2.3577235772357724e-05, | |
| "loss": 0.2469, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 2.1951219512195124e-05, | |
| "loss": 0.2506, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 2.032520325203252e-05, | |
| "loss": 0.1756, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "eval_accuracy": 0.9233438485804416, | |
| "eval_f1": 0.9235127478753541, | |
| "eval_loss": 0.20944611728191376, | |
| "eval_precision": 0.904996915484269, | |
| "eval_recall": 0.9428020565552699, | |
| "eval_runtime": 21.6836, | |
| "eval_samples_per_second": 146.193, | |
| "eval_steps_per_second": 18.309, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 1.869918699186992e-05, | |
| "loss": 0.2227, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 1.707317073170732e-05, | |
| "loss": 0.2191, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.5447154471544717e-05, | |
| "loss": 0.1611, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "eval_accuracy": 0.9280757097791799, | |
| "eval_f1": 0.9263089851325145, | |
| "eval_loss": 0.19648678600788116, | |
| "eval_precision": 0.9317295188556567, | |
| "eval_recall": 0.9209511568123393, | |
| "eval_runtime": 21.0389, | |
| "eval_samples_per_second": 150.673, | |
| "eval_steps_per_second": 18.87, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.3821138211382115e-05, | |
| "loss": 0.1854, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 1.2195121951219513e-05, | |
| "loss": 0.1772, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 1.0569105691056911e-05, | |
| "loss": 0.2562, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "eval_accuracy": 0.9236593059936908, | |
| "eval_f1": 0.9252625077208154, | |
| "eval_loss": 0.20132240653038025, | |
| "eval_precision": 0.8906064209274673, | |
| "eval_recall": 0.9627249357326478, | |
| "eval_runtime": 21.0921, | |
| "eval_samples_per_second": 150.293, | |
| "eval_steps_per_second": 18.822, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 8.94308943089431e-06, | |
| "loss": 0.2303, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 7.317073170731707e-06, | |
| "loss": 0.2242, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 5.6910569105691056e-06, | |
| "loss": 0.1955, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "eval_accuracy": 0.931230283911672, | |
| "eval_f1": 0.9316185696361355, | |
| "eval_loss": 0.18451988697052002, | |
| "eval_precision": 0.9099264705882353, | |
| "eval_recall": 0.9543701799485861, | |
| "eval_runtime": 20.9834, | |
| "eval_samples_per_second": 151.072, | |
| "eval_steps_per_second": 18.92, | |
| "step": 325 | |
| } | |
| ], | |
| "max_steps": 346, | |
| "num_train_epochs": 1, | |
| "total_flos": 619955424192000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |