| { | |
| "best_metric": 0.9186971783638, | |
| "best_model_checkpoint": "./Finetuned_Classification_model_200k/checkpoint-500", | |
| "epoch": 0.16, | |
| "eval_steps": 500, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1e-05, | |
| "loss": 1.037, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2e-05, | |
| "loss": 0.907, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3e-05, | |
| "loss": 0.7159, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4e-05, | |
| "loss": 0.5758, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 5e-05, | |
| "loss": 0.3454, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.997326203208557e-05, | |
| "loss": 0.1851, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.994652406417113e-05, | |
| "loss": 0.2821, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.9919786096256686e-05, | |
| "loss": 0.1741, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.9893048128342245e-05, | |
| "loss": 0.1749, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.986631016042781e-05, | |
| "loss": 0.2407, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.983957219251337e-05, | |
| "loss": 0.2612, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.981283422459893e-05, | |
| "loss": 0.1677, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.9786096256684495e-05, | |
| "loss": 0.1123, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.975935828877006e-05, | |
| "loss": 0.1368, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.973262032085561e-05, | |
| "loss": 0.1449, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.970588235294118e-05, | |
| "loss": 0.152, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.967914438502674e-05, | |
| "loss": 0.2504, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.9652406417112304e-05, | |
| "loss": 0.2578, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.962566844919786e-05, | |
| "loss": 0.1289, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.959893048128342e-05, | |
| "loss": 0.1263, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.957219251336899e-05, | |
| "loss": 0.1031, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9545454545454553e-05, | |
| "loss": 0.2453, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9518716577540106e-05, | |
| "loss": 0.0675, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.949197860962567e-05, | |
| "loss": 0.1807, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.946524064171123e-05, | |
| "loss": 0.1493, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9438502673796796e-05, | |
| "loss": 0.109, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9411764705882355e-05, | |
| "loss": 0.2148, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9385026737967914e-05, | |
| "loss": 0.2574, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.935828877005348e-05, | |
| "loss": 0.2198, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.933155080213904e-05, | |
| "loss": 0.1574, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.93048128342246e-05, | |
| "loss": 0.0866, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.9278074866310164e-05, | |
| "loss": 0.1371, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.925133689839572e-05, | |
| "loss": 0.2074, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.922459893048129e-05, | |
| "loss": 0.3628, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.919786096256685e-05, | |
| "loss": 0.1888, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.917112299465241e-05, | |
| "loss": 0.0934, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.914438502673797e-05, | |
| "loss": 0.1593, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.911764705882353e-05, | |
| "loss": 0.0721, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.909090909090909e-05, | |
| "loss": 0.1139, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.906417112299466e-05, | |
| "loss": 0.1118, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.9037433155080216e-05, | |
| "loss": 0.218, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.901069518716578e-05, | |
| "loss": 0.126, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.898395721925134e-05, | |
| "loss": 0.1057, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.89572192513369e-05, | |
| "loss": 0.1825, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.8930481283422465e-05, | |
| "loss": 0.1379, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.8903743315508024e-05, | |
| "loss": 0.1311, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.8877005347593584e-05, | |
| "loss": 0.0722, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.885026737967915e-05, | |
| "loss": 0.1286, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.882352941176471e-05, | |
| "loss": 0.1534, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.879679144385027e-05, | |
| "loss": 0.1332, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "eval_accuracy": 0.8395015105740181, | |
| "eval_f1": 0.8361086180158485, | |
| "eval_loss": 0.9186971783638, | |
| "eval_precision": 0.8701532825604937, | |
| "eval_recall": 0.8395015105740181, | |
| "eval_runtime": 10.5139, | |
| "eval_samples_per_second": 251.857, | |
| "eval_steps_per_second": 3.995, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.8770053475935826e-05, | |
| "loss": 0.1317, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.874331550802139e-05, | |
| "loss": 0.0683, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.871657754010696e-05, | |
| "loss": 0.0474, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.868983957219252e-05, | |
| "loss": 0.2302, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.8663101604278076e-05, | |
| "loss": 0.1788, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.863636363636364e-05, | |
| "loss": 0.0917, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.86096256684492e-05, | |
| "loss": 0.2001, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.858288770053476e-05, | |
| "loss": 0.3409, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.855614973262032e-05, | |
| "loss": 0.2044, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.8529411764705885e-05, | |
| "loss": 0.1292, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.850267379679145e-05, | |
| "loss": 0.1422, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.8475935828877e-05, | |
| "loss": 0.1165, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.844919786096257e-05, | |
| "loss": 0.1115, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.8422459893048135e-05, | |
| "loss": 0.0957, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.8395721925133694e-05, | |
| "loss": 0.1791, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.836898395721925e-05, | |
| "loss": 0.0691, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.834224598930481e-05, | |
| "loss": 0.2829, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.831550802139038e-05, | |
| "loss": 0.2635, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.828877005347594e-05, | |
| "loss": 0.1722, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.8262032085561496e-05, | |
| "loss": 0.0974, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.823529411764706e-05, | |
| "loss": 0.0888, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.820855614973262e-05, | |
| "loss": 0.3148, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.8181818181818186e-05, | |
| "loss": 0.3556, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.8155080213903745e-05, | |
| "loss": 0.1788, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.8128342245989304e-05, | |
| "loss": 0.4376, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.810160427807487e-05, | |
| "loss": 0.2413, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.807486631016043e-05, | |
| "loss": 0.1802, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.804812834224599e-05, | |
| "loss": 0.1364, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.8021390374331554e-05, | |
| "loss": 0.0667, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.799465240641711e-05, | |
| "loss": 0.1409, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.796791443850268e-05, | |
| "loss": 0.0897, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.794117647058824e-05, | |
| "loss": 0.0871, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.79144385026738e-05, | |
| "loss": 0.0552, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.788770053475936e-05, | |
| "loss": 0.2569, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.786096256684492e-05, | |
| "loss": 0.2877, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.783422459893048e-05, | |
| "loss": 0.1365, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.7807486631016047e-05, | |
| "loss": 0.1604, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.7780748663101606e-05, | |
| "loss": 0.2179, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.7754010695187165e-05, | |
| "loss": 0.0969, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.772727272727273e-05, | |
| "loss": 0.0953, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.770053475935829e-05, | |
| "loss": 0.0803, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.7673796791443855e-05, | |
| "loss": 0.1573, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.7647058823529414e-05, | |
| "loss": 0.1388, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.7620320855614973e-05, | |
| "loss": 0.1559, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.759358288770054e-05, | |
| "loss": 0.2411, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.75668449197861e-05, | |
| "loss": 0.1119, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.754010695187166e-05, | |
| "loss": 0.1416, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.751336898395722e-05, | |
| "loss": 0.1073, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.748663101604278e-05, | |
| "loss": 0.1799, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.745989304812835e-05, | |
| "loss": 0.127, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "eval_accuracy": 0.847809667673716, | |
| "eval_f1": 0.8460260531471735, | |
| "eval_loss": 1.017073392868042, | |
| "eval_precision": 0.8647086198841812, | |
| "eval_recall": 0.847809667673716, | |
| "eval_runtime": 10.5379, | |
| "eval_samples_per_second": 251.284, | |
| "eval_steps_per_second": 3.986, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 18750, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 8419629367296000.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |