| { | |
| "best_metric": 0.6476547574288157, | |
| "best_model_checkpoint": "./nlu_finetuned_models/cola/roberta-base_lr1e-05/checkpoint-1924", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 4810, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.39855843782424927, | |
| "eval_matthews_correlation": 0.551445188783165, | |
| "eval_runtime": 0.4311, | |
| "eval_samples_per_second": 1985.797, | |
| "eval_steps_per_second": 125.272, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 1.0395010395010396, | |
| "grad_norm": 23.592742919921875, | |
| "learning_rate": 9.533289095332891e-06, | |
| "loss": 0.537, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.3867790997028351, | |
| "eval_matthews_correlation": 0.5922946478625156, | |
| "eval_runtime": 0.4252, | |
| "eval_samples_per_second": 2013.273, | |
| "eval_steps_per_second": 127.006, | |
| "step": 962 | |
| }, | |
| { | |
| "epoch": 2.079002079002079, | |
| "grad_norm": 24.047794342041016, | |
| "learning_rate": 8.427339084273391e-06, | |
| "loss": 0.3663, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.44944295287132263, | |
| "eval_matthews_correlation": 0.5697569536882547, | |
| "eval_runtime": 0.432, | |
| "eval_samples_per_second": 1981.347, | |
| "eval_steps_per_second": 124.992, | |
| "step": 1443 | |
| }, | |
| { | |
| "epoch": 3.1185031185031185, | |
| "grad_norm": 19.57173728942871, | |
| "learning_rate": 7.3213890732138915e-06, | |
| "loss": 0.2692, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.5625013709068298, | |
| "eval_matthews_correlation": 0.6476547574288157, | |
| "eval_runtime": 0.4183, | |
| "eval_samples_per_second": 2046.415, | |
| "eval_steps_per_second": 129.096, | |
| "step": 1924 | |
| }, | |
| { | |
| "epoch": 4.158004158004158, | |
| "grad_norm": 18.871004104614258, | |
| "learning_rate": 6.2154390621543915e-06, | |
| "loss": 0.2002, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.6681837439537048, | |
| "eval_matthews_correlation": 0.5953927103011889, | |
| "eval_runtime": 0.4264, | |
| "eval_samples_per_second": 2007.326, | |
| "eval_steps_per_second": 126.63, | |
| "step": 2405 | |
| }, | |
| { | |
| "epoch": 5.197505197505198, | |
| "grad_norm": 12.891629219055176, | |
| "learning_rate": 5.1094890510948916e-06, | |
| "loss": 0.1761, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.7049034237861633, | |
| "eval_matthews_correlation": 0.6376896027079079, | |
| "eval_runtime": 0.4161, | |
| "eval_samples_per_second": 2057.2, | |
| "eval_steps_per_second": 129.777, | |
| "step": 2886 | |
| }, | |
| { | |
| "epoch": 6.237006237006237, | |
| "grad_norm": 0.21968619525432587, | |
| "learning_rate": 4.003539040035391e-06, | |
| "loss": 0.1486, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.8099717497825623, | |
| "eval_matthews_correlation": 0.6372565507596143, | |
| "eval_runtime": 0.4311, | |
| "eval_samples_per_second": 1985.553, | |
| "eval_steps_per_second": 125.257, | |
| "step": 3367 | |
| }, | |
| { | |
| "epoch": 7.276507276507276, | |
| "grad_norm": 0.38824957609176636, | |
| "learning_rate": 2.897589028975891e-06, | |
| "loss": 0.1261, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.8209366202354431, | |
| "eval_matthews_correlation": 0.6409103987573772, | |
| "eval_runtime": 0.4313, | |
| "eval_samples_per_second": 1984.868, | |
| "eval_steps_per_second": 125.214, | |
| "step": 3848 | |
| }, | |
| { | |
| "epoch": 8.316008316008316, | |
| "grad_norm": 0.6946465969085693, | |
| "learning_rate": 1.7916390179163902e-06, | |
| "loss": 0.0963, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.866554319858551, | |
| "eval_matthews_correlation": 0.6465132548864322, | |
| "eval_runtime": 0.4204, | |
| "eval_samples_per_second": 2036.18, | |
| "eval_steps_per_second": 128.451, | |
| "step": 4329 | |
| }, | |
| { | |
| "epoch": 9.355509355509355, | |
| "grad_norm": 0.8278540968894958, | |
| "learning_rate": 6.856890068568902e-07, | |
| "loss": 0.0918, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.9010610580444336, | |
| "eval_matthews_correlation": 0.6344646755477841, | |
| "eval_runtime": 0.4206, | |
| "eval_samples_per_second": 2035.283, | |
| "eval_steps_per_second": 128.394, | |
| "step": 4810 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 4810, | |
| "total_flos": 806520176327100.0, | |
| "train_loss": 0.2141543533103134, | |
| "train_runtime": 285.3143, | |
| "train_samples_per_second": 269.703, | |
| "train_steps_per_second": 16.859 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 4810, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 806520176327100.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |