| { | |
| "best_metric": 0.9186971783638, | |
| "best_model_checkpoint": "./Finetuned_Classification_model_200k/checkpoint-500", | |
| "epoch": 0.08, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1e-05, | |
| "loss": 1.037, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2e-05, | |
| "loss": 0.907, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3e-05, | |
| "loss": 0.7159, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4e-05, | |
| "loss": 0.5758, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 5e-05, | |
| "loss": 0.3454, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.997326203208557e-05, | |
| "loss": 0.1851, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.994652406417113e-05, | |
| "loss": 0.2821, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.9919786096256686e-05, | |
| "loss": 0.1741, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4.9893048128342245e-05, | |
| "loss": 0.1749, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.986631016042781e-05, | |
| "loss": 0.2407, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.983957219251337e-05, | |
| "loss": 0.2612, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.981283422459893e-05, | |
| "loss": 0.1677, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.9786096256684495e-05, | |
| "loss": 0.1123, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.975935828877006e-05, | |
| "loss": 0.1368, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.973262032085561e-05, | |
| "loss": 0.1449, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.970588235294118e-05, | |
| "loss": 0.152, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.967914438502674e-05, | |
| "loss": 0.2504, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.9652406417112304e-05, | |
| "loss": 0.2578, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.962566844919786e-05, | |
| "loss": 0.1289, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.959893048128342e-05, | |
| "loss": 0.1263, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.957219251336899e-05, | |
| "loss": 0.1031, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9545454545454553e-05, | |
| "loss": 0.2453, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9518716577540106e-05, | |
| "loss": 0.0675, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.949197860962567e-05, | |
| "loss": 0.1807, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.946524064171123e-05, | |
| "loss": 0.1493, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9438502673796796e-05, | |
| "loss": 0.109, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9411764705882355e-05, | |
| "loss": 0.2148, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.9385026737967914e-05, | |
| "loss": 0.2574, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.935828877005348e-05, | |
| "loss": 0.2198, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.933155080213904e-05, | |
| "loss": 0.1574, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.93048128342246e-05, | |
| "loss": 0.0866, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.9278074866310164e-05, | |
| "loss": 0.1371, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.925133689839572e-05, | |
| "loss": 0.2074, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.922459893048129e-05, | |
| "loss": 0.3628, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.919786096256685e-05, | |
| "loss": 0.1888, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.917112299465241e-05, | |
| "loss": 0.0934, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.914438502673797e-05, | |
| "loss": 0.1593, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.911764705882353e-05, | |
| "loss": 0.0721, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.909090909090909e-05, | |
| "loss": 0.1139, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.906417112299466e-05, | |
| "loss": 0.1118, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.9037433155080216e-05, | |
| "loss": 0.218, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.901069518716578e-05, | |
| "loss": 0.126, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.898395721925134e-05, | |
| "loss": 0.1057, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.89572192513369e-05, | |
| "loss": 0.1825, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.8930481283422465e-05, | |
| "loss": 0.1379, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.8903743315508024e-05, | |
| "loss": 0.1311, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.8877005347593584e-05, | |
| "loss": 0.0722, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.885026737967915e-05, | |
| "loss": 0.1286, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.882352941176471e-05, | |
| "loss": 0.1534, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.879679144385027e-05, | |
| "loss": 0.1332, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "eval_accuracy": 0.8395015105740181, | |
| "eval_f1": 0.8361086180158485, | |
| "eval_loss": 0.9186971783638, | |
| "eval_precision": 0.8701532825604937, | |
| "eval_recall": 0.8395015105740181, | |
| "eval_runtime": 10.5139, | |
| "eval_samples_per_second": 251.857, | |
| "eval_steps_per_second": 3.995, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 18750, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 4209814683648000.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |