| { | |
| "best_global_step": 200, | |
| "best_metric": 8.553091902285814e-05, | |
| "best_model_checkpoint": "./UltiVul_focal_loss_checkpoints/checkpoint-200", | |
| "epoch": 0.05630630630630631, | |
| "eval_steps": 100, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002815315315315315, | |
| "grad_norm": 49.37904357910156, | |
| "learning_rate": 4.5e-06, | |
| "loss": 23.6642, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.00563063063063063, | |
| "grad_norm": 32.69184112548828, | |
| "learning_rate": 9.5e-06, | |
| "loss": 18.4995, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.008445945945945946, | |
| "grad_norm": 17.92224884033203, | |
| "learning_rate": 1.45e-05, | |
| "loss": 10.2669, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01126126126126126, | |
| "grad_norm": 12.185972213745117, | |
| "learning_rate": 1.9500000000000003e-05, | |
| "loss": 4.8376, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.014076576576576577, | |
| "grad_norm": 2.712137460708618, | |
| "learning_rate": 2.45e-05, | |
| "loss": 1.0611, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.016891891891891893, | |
| "grad_norm": 0.8478547930717468, | |
| "learning_rate": 2.95e-05, | |
| "loss": 0.0324, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.019707207207207207, | |
| "grad_norm": 1.0729281902313232, | |
| "learning_rate": 3.45e-05, | |
| "loss": 0.0033, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.02252252252252252, | |
| "grad_norm": 0.003953734878450632, | |
| "learning_rate": 3.9500000000000005e-05, | |
| "loss": 0.001, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.02533783783783784, | |
| "grad_norm": 0.25488701462745667, | |
| "learning_rate": 4.4500000000000004e-05, | |
| "loss": 0.0011, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.028153153153153154, | |
| "grad_norm": 0.0010484829545021057, | |
| "learning_rate": 4.9500000000000004e-05, | |
| "loss": 0.001, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.028153153153153154, | |
| "eval_loss": 0.00028092681895941496, | |
| "eval_runtime": 1519.0849, | |
| "eval_samples_per_second": 2.291, | |
| "eval_steps_per_second": 1.145, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.030968468468468468, | |
| "grad_norm": 0.0045521012507379055, | |
| "learning_rate": 4.9937880765328895e-05, | |
| "loss": 0.0006, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.033783783783783786, | |
| "grad_norm": 0.008847632445394993, | |
| "learning_rate": 4.972354390769318e-05, | |
| "loss": 0.0006, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.0365990990990991, | |
| "grad_norm": 0.013454323634505272, | |
| "learning_rate": 4.9357537087851815e-05, | |
| "loss": 0.0004, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.039414414414414414, | |
| "grad_norm": 0.06617984175682068, | |
| "learning_rate": 4.884210562470972e-05, | |
| "loss": 0.0007, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.04222972972972973, | |
| "grad_norm": 0.0029065811540931463, | |
| "learning_rate": 4.818041150308695e-05, | |
| "loss": 0.0002, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04504504504504504, | |
| "grad_norm": 0.0019135670736432076, | |
| "learning_rate": 4.737651397609031e-05, | |
| "loss": 0.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.04786036036036036, | |
| "grad_norm": 0.018747340887784958, | |
| "learning_rate": 4.643534466306984e-05, | |
| "loss": 0.0004, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.05067567567567568, | |
| "grad_norm": 0.0025554385501891375, | |
| "learning_rate": 4.536267729592528e-05, | |
| "loss": 0.0001, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.05349099099099099, | |
| "grad_norm": 0.00040266645373776555, | |
| "learning_rate": 4.416509229935797e-05, | |
| "loss": 0.0002, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.05630630630630631, | |
| "grad_norm": 0.22791388630867004, | |
| "learning_rate": 4.284993642235523e-05, | |
| "loss": 0.0001, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.05630630630630631, | |
| "eval_loss": 8.553091902285814e-05, | |
| "eval_runtime": 1509.8715, | |
| "eval_samples_per_second": 2.305, | |
| "eval_steps_per_second": 1.152, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 501, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.1228802957656064e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |