| { | |
| "best_global_step": 500, | |
| "best_metric": 1.4448833098867908e-05, | |
| "best_model_checkpoint": "./UltiVul_focal_loss_checkpoints/checkpoint-500", | |
| "epoch": 0.14104729729729729, | |
| "eval_steps": 100, | |
| "global_step": 501, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002815315315315315, | |
| "grad_norm": 49.37904357910156, | |
| "learning_rate": 4.5e-06, | |
| "loss": 23.6642, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.00563063063063063, | |
| "grad_norm": 32.69184112548828, | |
| "learning_rate": 9.5e-06, | |
| "loss": 18.4995, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.008445945945945946, | |
| "grad_norm": 17.92224884033203, | |
| "learning_rate": 1.45e-05, | |
| "loss": 10.2669, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01126126126126126, | |
| "grad_norm": 12.185972213745117, | |
| "learning_rate": 1.9500000000000003e-05, | |
| "loss": 4.8376, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.014076576576576577, | |
| "grad_norm": 2.712137460708618, | |
| "learning_rate": 2.45e-05, | |
| "loss": 1.0611, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.016891891891891893, | |
| "grad_norm": 0.8478547930717468, | |
| "learning_rate": 2.95e-05, | |
| "loss": 0.0324, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.019707207207207207, | |
| "grad_norm": 1.0729281902313232, | |
| "learning_rate": 3.45e-05, | |
| "loss": 0.0033, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.02252252252252252, | |
| "grad_norm": 0.003953734878450632, | |
| "learning_rate": 3.9500000000000005e-05, | |
| "loss": 0.001, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.02533783783783784, | |
| "grad_norm": 0.25488701462745667, | |
| "learning_rate": 4.4500000000000004e-05, | |
| "loss": 0.0011, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.028153153153153154, | |
| "grad_norm": 0.0010484829545021057, | |
| "learning_rate": 4.9500000000000004e-05, | |
| "loss": 0.001, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.028153153153153154, | |
| "eval_loss": 0.00028092681895941496, | |
| "eval_runtime": 1519.0849, | |
| "eval_samples_per_second": 2.291, | |
| "eval_steps_per_second": 1.145, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.030968468468468468, | |
| "grad_norm": 0.0045521012507379055, | |
| "learning_rate": 4.9937880765328895e-05, | |
| "loss": 0.0006, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.033783783783783786, | |
| "grad_norm": 0.008847632445394993, | |
| "learning_rate": 4.972354390769318e-05, | |
| "loss": 0.0006, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.0365990990990991, | |
| "grad_norm": 0.013454323634505272, | |
| "learning_rate": 4.9357537087851815e-05, | |
| "loss": 0.0004, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.039414414414414414, | |
| "grad_norm": 0.06617984175682068, | |
| "learning_rate": 4.884210562470972e-05, | |
| "loss": 0.0007, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.04222972972972973, | |
| "grad_norm": 0.0029065811540931463, | |
| "learning_rate": 4.818041150308695e-05, | |
| "loss": 0.0002, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04504504504504504, | |
| "grad_norm": 0.0019135670736432076, | |
| "learning_rate": 4.737651397609031e-05, | |
| "loss": 0.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.04786036036036036, | |
| "grad_norm": 0.018747340887784958, | |
| "learning_rate": 4.643534466306984e-05, | |
| "loss": 0.0004, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.05067567567567568, | |
| "grad_norm": 0.0025554385501891375, | |
| "learning_rate": 4.536267729592528e-05, | |
| "loss": 0.0001, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.05349099099099099, | |
| "grad_norm": 0.00040266645373776555, | |
| "learning_rate": 4.416509229935797e-05, | |
| "loss": 0.0002, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.05630630630630631, | |
| "grad_norm": 0.22791388630867004, | |
| "learning_rate": 4.284993642235523e-05, | |
| "loss": 0.0001, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.05630630630630631, | |
| "eval_loss": 8.553091902285814e-05, | |
| "eval_runtime": 1509.8715, | |
| "eval_samples_per_second": 2.305, | |
| "eval_steps_per_second": 1.152, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.05912162162162162, | |
| "grad_norm": 0.0003946940996684134, | |
| "learning_rate": 4.142527766855376e-05, | |
| "loss": 0.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.061936936936936936, | |
| "grad_norm": 0.0003258013166487217, | |
| "learning_rate": 3.9899855801967675e-05, | |
| "loss": 0.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.06475225225225226, | |
| "grad_norm": 0.00010741609003162012, | |
| "learning_rate": 3.828302873171048e-05, | |
| "loss": 0.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.06756756756756757, | |
| "grad_norm": 0.00039509148336946964, | |
| "learning_rate": 3.6584715104621596e-05, | |
| "loss": 0.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.07038288288288289, | |
| "grad_norm": 0.010233906097710133, | |
| "learning_rate": 3.481533345797042e-05, | |
| "loss": 0.0015, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.0731981981981982, | |
| "grad_norm": 0.004945151507854462, | |
| "learning_rate": 3.298573830551425e-05, | |
| "loss": 0.0003, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.07601351351351351, | |
| "grad_norm": 0.00017072241462301463, | |
| "learning_rate": 3.110715354899873e-05, | |
| "loss": 0.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.07882882882882883, | |
| "grad_norm": 6.019602369633503e-05, | |
| "learning_rate": 2.919110362359727e-05, | |
| "loss": 0.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.08164414414414414, | |
| "grad_norm": 0.0021696966141462326, | |
| "learning_rate": 2.724934279968692e-05, | |
| "loss": 0.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.08445945945945946, | |
| "grad_norm": 4.268585325917229e-05, | |
| "learning_rate": 2.5293783074668785e-05, | |
| "loss": 0.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.08445945945945946, | |
| "eval_loss": 5.6611788750160486e-05, | |
| "eval_runtime": 1478.3232, | |
| "eval_samples_per_second": 2.354, | |
| "eval_steps_per_second": 1.177, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.08727477477477477, | |
| "grad_norm": 7.843998173484579e-05, | |
| "learning_rate": 2.333642109719049e-05, | |
| "loss": 0.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.09009009009009009, | |
| "grad_norm": 0.0001974907354451716, | |
| "learning_rate": 2.13892645720639e-05, | |
| "loss": 0.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.0929054054054054, | |
| "grad_norm": 0.08875004947185516, | |
| "learning_rate": 1.9464258597357353e-05, | |
| "loss": 0.0005, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.09572072072072071, | |
| "grad_norm": 0.024796342477202415, | |
| "learning_rate": 1.7573212385557525e-05, | |
| "loss": 0.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.09853603603603604, | |
| "grad_norm": 7.793847908033058e-05, | |
| "learning_rate": 1.5727726818339933e-05, | |
| "loss": 0.0002, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.10135135135135136, | |
| "grad_norm": 0.00016479768964927644, | |
| "learning_rate": 1.393912327937337e-05, | |
| "loss": 0.0001, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.10416666666666667, | |
| "grad_norm": 0.38837477564811707, | |
| "learning_rate": 1.2218374201743018e-05, | |
| "loss": 0.0001, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.10698198198198199, | |
| "grad_norm": 0.0007006971281953156, | |
| "learning_rate": 1.0576035756058728e-05, | |
| "loss": 0.0, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.1097972972972973, | |
| "grad_norm": 0.000660314632114023, | |
| "learning_rate": 9.022183092182335e-06, | |
| "loss": 0.0, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.11261261261261261, | |
| "grad_norm": 2.8984422897337936e-05, | |
| "learning_rate": 7.566348531842601e-06, | |
| "loss": 0.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.11261261261261261, | |
| "eval_loss": 1.468156187911518e-05, | |
| "eval_runtime": 1487.0768, | |
| "eval_samples_per_second": 2.34, | |
| "eval_steps_per_second": 1.17, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.11542792792792793, | |
| "grad_norm": 3.786681918427348e-05, | |
| "learning_rate": 6.217463091303572e-06, | |
| "loss": 0.0003, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.11824324324324324, | |
| "grad_norm": 0.00014283777272794396, | |
| "learning_rate": 4.9838016928235645e-06, | |
| "loss": 0.0, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.12105855855855856, | |
| "grad_norm": 0.00020300326286815107, | |
| "learning_rate": 3.872932401012597e-06, | |
| "loss": 0.0, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.12387387387387387, | |
| "grad_norm": 3.677326822071336e-05, | |
| "learning_rate": 2.8916699955047477e-06, | |
| "loss": 0.0, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.1266891891891892, | |
| "grad_norm": 9.273546311305836e-05, | |
| "learning_rate": 2.0460341647602466e-06, | |
| "loss": 0.0, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.12950450450450451, | |
| "grad_norm": 5.129961937200278e-05, | |
| "learning_rate": 1.3412125774631495e-06, | |
| "loss": 0.0, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.13231981981981983, | |
| "grad_norm": 0.00041971213067881763, | |
| "learning_rate": 7.815290580581336e-07, | |
| "loss": 0.0, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.13513513513513514, | |
| "grad_norm": 0.014930936507880688, | |
| "learning_rate": 3.704170616579544e-07, | |
| "loss": 0.0, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.13795045045045046, | |
| "grad_norm": 0.008020805194973946, | |
| "learning_rate": 1.1039861104336357e-07, | |
| "loss": 0.0, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.14076576576576577, | |
| "grad_norm": 0.0007628165767528117, | |
| "learning_rate": 3.0688249693794356e-09, | |
| "loss": 0.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.14076576576576577, | |
| "eval_loss": 1.4448833098867908e-05, | |
| "eval_runtime": 1495.0561, | |
| "eval_samples_per_second": 2.328, | |
| "eval_steps_per_second": 1.164, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 501, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.7663569190980813e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |