| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ Fine-pruning Masked BERT on sequence classification on GLUE.""" |
|
|
| import argparse |
| import glob |
| import json |
| import logging |
| import os |
| import random |
|
|
| import numpy as np |
| import torch |
| from emmental import MaskedBertConfig, MaskedBertForSequenceClassification |
| from torch import nn |
| from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset |
| from torch.utils.data.distributed import DistributedSampler |
| from tqdm import tqdm, trange |
|
|
| from transformers import ( |
| WEIGHTS_NAME, |
| AdamW, |
| BertConfig, |
| BertForSequenceClassification, |
| BertTokenizer, |
| get_linear_schedule_with_warmup, |
| ) |
| from transformers import glue_compute_metrics as compute_metrics |
| from transformers import glue_convert_examples_to_features as convert_examples_to_features |
| from transformers import glue_output_modes as output_modes |
| from transformers import glue_processors as processors |
|
|
|
|
| try: |
| from torch.utils.tensorboard import SummaryWriter |
| except ImportError: |
| from tensorboardX import SummaryWriter |
|
|
|
|
| logger = logging.getLogger(__name__) |
|
|
| MODEL_CLASSES = { |
| "bert": (BertConfig, BertForSequenceClassification, BertTokenizer), |
| "masked_bert": (MaskedBertConfig, MaskedBertForSequenceClassification, BertTokenizer), |
| } |
|
|
|
|
| def set_seed(args): |
| random.seed(args.seed) |
| np.random.seed(args.seed) |
| torch.manual_seed(args.seed) |
| if args.n_gpu > 0: |
| torch.cuda.manual_seed_all(args.seed) |
|
|
|
|
| def schedule_threshold( |
| step: int, |
| total_step: int, |
| warmup_steps: int, |
| initial_threshold: float, |
| final_threshold: float, |
| initial_warmup: int, |
| final_warmup: int, |
| final_lambda: float, |
| ): |
| if step <= initial_warmup * warmup_steps: |
| threshold = initial_threshold |
| elif step > (total_step - final_warmup * warmup_steps): |
| threshold = final_threshold |
| else: |
| spars_warmup_steps = initial_warmup * warmup_steps |
| spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps |
| mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) |
| threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff**3) |
| regu_lambda = final_lambda * threshold / final_threshold |
| return threshold, regu_lambda |
|
|
|
|
| def regularization(model: nn.Module, mode: str): |
| regu, counter = 0, 0 |
| for name, param in model.named_parameters(): |
| if "mask_scores" in name: |
| if mode == "l1": |
| regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() |
| elif mode == "l0": |
| regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() |
| else: |
| ValueError("Don't know this mode.") |
| counter += 1 |
| return regu / counter |
|
|
|
|
| def train(args, train_dataset, model, tokenizer, teacher=None): |
| """Train the model""" |
| if args.local_rank in [-1, 0]: |
| tb_writer = SummaryWriter(log_dir=args.output_dir) |
|
|
| args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) |
| train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) |
| train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) |
|
|
| if args.max_steps > 0: |
| t_total = args.max_steps |
| args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 |
| else: |
| t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs |
|
|
| |
| no_decay = ["bias", "LayerNorm.weight"] |
| optimizer_grouped_parameters = [ |
| { |
| "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], |
| "lr": args.mask_scores_learning_rate, |
| }, |
| { |
| "params": [ |
| p |
| for n, p in model.named_parameters() |
| if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) |
| ], |
| "lr": args.learning_rate, |
| "weight_decay": args.weight_decay, |
| }, |
| { |
| "params": [ |
| p |
| for n, p in model.named_parameters() |
| if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) |
| ], |
| "lr": args.learning_rate, |
| "weight_decay": 0.0, |
| }, |
| ] |
|
|
| optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) |
| scheduler = get_linear_schedule_with_warmup( |
| optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total |
| ) |
|
|
| |
| if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( |
| os.path.join(args.model_name_or_path, "scheduler.pt") |
| ): |
| |
| optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) |
| scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) |
|
|
| if args.fp16: |
| try: |
| from apex import amp |
| except ImportError: |
| raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") |
| model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) |
|
|
| |
| if args.n_gpu > 1: |
| model = nn.DataParallel(model) |
|
|
| |
| if args.local_rank != -1: |
| model = nn.parallel.DistributedDataParallel( |
| model, |
| device_ids=[args.local_rank], |
| output_device=args.local_rank, |
| find_unused_parameters=True, |
| ) |
|
|
| |
| logger.info("***** Running training *****") |
| logger.info(" Num examples = %d", len(train_dataset)) |
| logger.info(" Num Epochs = %d", args.num_train_epochs) |
| logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) |
| logger.info( |
| " Total train batch size (w. parallel, distributed & accumulation) = %d", |
| args.train_batch_size |
| * args.gradient_accumulation_steps |
| * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), |
| ) |
| logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) |
| logger.info(" Total optimization steps = %d", t_total) |
| |
| if teacher is not None: |
| logger.info(" Training with distillation") |
|
|
| global_step = 0 |
| |
| if args.global_topk: |
| threshold_mem = None |
| epochs_trained = 0 |
| steps_trained_in_current_epoch = 0 |
| |
| if os.path.exists(args.model_name_or_path): |
| |
| try: |
| global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) |
| except ValueError: |
| global_step = 0 |
| epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) |
| steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) |
|
|
| logger.info(" Continuing training from checkpoint, will skip to saved global_step") |
| logger.info(" Continuing training from epoch %d", epochs_trained) |
| logger.info(" Continuing training from global step %d", global_step) |
| logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) |
|
|
| tr_loss, logging_loss = 0.0, 0.0 |
| model.zero_grad() |
| train_iterator = trange( |
| epochs_trained, |
| int(args.num_train_epochs), |
| desc="Epoch", |
| disable=args.local_rank not in [-1, 0], |
| ) |
| set_seed(args) |
| for _ in train_iterator: |
| epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) |
| for step, batch in enumerate(epoch_iterator): |
| |
| if steps_trained_in_current_epoch > 0: |
| steps_trained_in_current_epoch -= 1 |
| continue |
|
|
| model.train() |
| batch = tuple(t.to(args.device) for t in batch) |
| threshold, regu_lambda = schedule_threshold( |
| step=global_step, |
| total_step=t_total, |
| warmup_steps=args.warmup_steps, |
| final_threshold=args.final_threshold, |
| initial_threshold=args.initial_threshold, |
| final_warmup=args.final_warmup, |
| initial_warmup=args.initial_warmup, |
| final_lambda=args.final_lambda, |
| ) |
| |
| if args.global_topk: |
| if threshold == 1.0: |
| threshold = -1e2 |
| else: |
| if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): |
| |
| concat = torch.cat( |
| [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] |
| ) |
| n = concat.numel() |
| kth = max(n - (int(n * threshold) + 1), 1) |
| threshold_mem = concat.kthvalue(kth).values.item() |
| threshold = threshold_mem |
| else: |
| threshold = threshold_mem |
| inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} |
| if args.model_type != "distilbert": |
| inputs["token_type_ids"] = ( |
| batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None |
| ) |
|
|
| if "masked" in args.model_type: |
| inputs["threshold"] = threshold |
|
|
| outputs = model(**inputs) |
| loss, logits_stu = outputs |
|
|
| |
| if teacher is not None: |
| if "token_type_ids" not in inputs: |
| inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2] |
| with torch.no_grad(): |
| (logits_tea,) = teacher( |
| input_ids=inputs["input_ids"], |
| token_type_ids=inputs["token_type_ids"], |
| attention_mask=inputs["attention_mask"], |
| ) |
|
|
| loss_logits = nn.functional.kl_div( |
| input=nn.functional.log_softmax(logits_stu / args.temperature, dim=-1), |
| target=nn.functional.softmax(logits_tea / args.temperature, dim=-1), |
| reduction="batchmean", |
| ) * (args.temperature**2) |
|
|
| loss = args.alpha_distil * loss_logits + args.alpha_ce * loss |
|
|
| |
| if args.regularization is not None: |
| regu_ = regularization(model=model, mode=args.regularization) |
| loss = loss + regu_lambda * regu_ |
|
|
| if args.n_gpu > 1: |
| loss = loss.mean() |
| if args.gradient_accumulation_steps > 1: |
| loss = loss / args.gradient_accumulation_steps |
|
|
| if args.fp16: |
| with amp.scale_loss(loss, optimizer) as scaled_loss: |
| scaled_loss.backward() |
| else: |
| loss.backward() |
|
|
| tr_loss += loss.item() |
| if (step + 1) % args.gradient_accumulation_steps == 0 or ( |
| |
| len(epoch_iterator) <= args.gradient_accumulation_steps |
| and (step + 1) == len(epoch_iterator) |
| ): |
| if args.fp16: |
| nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) |
| else: |
| nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) |
|
|
| if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: |
| tb_writer.add_scalar("threshold", threshold, global_step) |
| for name, param in model.named_parameters(): |
| if not param.requires_grad: |
| continue |
| tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) |
| tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) |
| tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) |
| tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) |
| tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) |
| tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) |
| if args.regularization is not None and "mask_scores" in name: |
| if args.regularization == "l1": |
| perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() |
| elif args.regularization == "l0": |
| perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() |
| tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) |
|
|
| optimizer.step() |
| scheduler.step() |
| model.zero_grad() |
| global_step += 1 |
|
|
| if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: |
| logs = {} |
| if ( |
| args.local_rank == -1 and args.evaluate_during_training |
| ): |
| results = evaluate(args, model, tokenizer) |
| for key, value in results.items(): |
| eval_key = "eval_{}".format(key) |
| logs[eval_key] = value |
|
|
| loss_scalar = (tr_loss - logging_loss) / args.logging_steps |
| learning_rate_scalar = scheduler.get_lr() |
| logs["learning_rate"] = learning_rate_scalar[0] |
| if len(learning_rate_scalar) > 1: |
| for idx, lr in enumerate(learning_rate_scalar[1:]): |
| logs[f"learning_rate/{idx+1}"] = lr |
| logs["loss"] = loss_scalar |
| if teacher is not None: |
| logs["loss/distil"] = loss_logits.item() |
| if args.regularization is not None: |
| logs["loss/regularization"] = regu_.item() |
| if (teacher is not None) or (args.regularization is not None): |
| if (teacher is not None) and (args.regularization is not None): |
| logs["loss/instant_ce"] = ( |
| loss.item() |
| - regu_lambda * logs["loss/regularization"] |
| - args.alpha_distil * logs["loss/distil"] |
| ) / args.alpha_ce |
| elif teacher is not None: |
| logs["loss/instant_ce"] = ( |
| loss.item() - args.alpha_distil * logs["loss/distil"] |
| ) / args.alpha_ce |
| else: |
| logs["loss/instant_ce"] = loss.item() - regu_lambda * logs["loss/regularization"] |
| logging_loss = tr_loss |
|
|
| for key, value in logs.items(): |
| tb_writer.add_scalar(key, value, global_step) |
| print(json.dumps({**logs, **{"step": global_step}})) |
|
|
| if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: |
| |
| output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) |
| if not os.path.exists(output_dir): |
| os.makedirs(output_dir) |
| model_to_save = ( |
| model.module if hasattr(model, "module") else model |
| ) |
| model_to_save.save_pretrained(output_dir) |
| tokenizer.save_pretrained(output_dir) |
|
|
| torch.save(args, os.path.join(output_dir, "training_args.bin")) |
| logger.info("Saving model checkpoint to %s", output_dir) |
|
|
| torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) |
| torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) |
| logger.info("Saving optimizer and scheduler states to %s", output_dir) |
|
|
| if args.max_steps > 0 and global_step > args.max_steps: |
| epoch_iterator.close() |
| break |
| if args.max_steps > 0 and global_step > args.max_steps: |
| train_iterator.close() |
| break |
|
|
| if args.local_rank in [-1, 0]: |
| tb_writer.close() |
|
|
| return global_step, tr_loss / global_step |
|
|
|
|
| def evaluate(args, model, tokenizer, prefix=""): |
| |
| eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) |
| eval_outputs_dirs = (args.output_dir, args.output_dir + "/MM") if args.task_name == "mnli" else (args.output_dir,) |
|
|
| results = {} |
| for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): |
| eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) |
|
|
| if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: |
| os.makedirs(eval_output_dir) |
|
|
| args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) |
| |
| eval_sampler = SequentialSampler(eval_dataset) |
| eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) |
|
|
| |
| if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): |
| model = nn.DataParallel(model) |
|
|
| |
| logger.info("***** Running evaluation {} *****".format(prefix)) |
| logger.info(" Num examples = %d", len(eval_dataset)) |
| logger.info(" Batch size = %d", args.eval_batch_size) |
| eval_loss = 0.0 |
| nb_eval_steps = 0 |
| preds = None |
| out_label_ids = None |
|
|
| |
| if args.global_topk: |
| threshold_mem = None |
|
|
| for batch in tqdm(eval_dataloader, desc="Evaluating"): |
| model.eval() |
| batch = tuple(t.to(args.device) for t in batch) |
|
|
| with torch.no_grad(): |
| inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} |
| if args.model_type != "distilbert": |
| inputs["token_type_ids"] = ( |
| batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None |
| ) |
| if "masked" in args.model_type: |
| inputs["threshold"] = args.final_threshold |
| if args.global_topk: |
| if threshold_mem is None: |
| concat = torch.cat( |
| [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] |
| ) |
| n = concat.numel() |
| kth = max(n - (int(n * args.final_threshold) + 1), 1) |
| threshold_mem = concat.kthvalue(kth).values.item() |
| inputs["threshold"] = threshold_mem |
| outputs = model(**inputs) |
| tmp_eval_loss, logits = outputs[:2] |
|
|
| eval_loss += tmp_eval_loss.mean().item() |
| nb_eval_steps += 1 |
| if preds is None: |
| preds = logits.detach().cpu().numpy() |
| out_label_ids = inputs["labels"].detach().cpu().numpy() |
| else: |
| preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) |
| out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) |
|
|
| eval_loss = eval_loss / nb_eval_steps |
| if args.output_mode == "classification": |
| from scipy.special import softmax |
|
|
| probs = softmax(preds, axis=-1) |
| entropy = np.exp((-probs * np.log(probs)).sum(axis=-1).mean()) |
| preds = np.argmax(preds, axis=1) |
| elif args.output_mode == "regression": |
| preds = np.squeeze(preds) |
| result = compute_metrics(eval_task, preds, out_label_ids) |
| results.update(result) |
| if entropy is not None: |
| result["eval_avg_entropy"] = entropy |
|
|
| output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") |
| with open(output_eval_file, "w") as writer: |
| logger.info("***** Eval results {} *****".format(prefix)) |
| for key in sorted(result.keys()): |
| logger.info(" %s = %s", key, str(result[key])) |
| writer.write("%s = %s\n" % (key, str(result[key]))) |
|
|
| return results |
|
|
|
|
| def load_and_cache_examples(args, task, tokenizer, evaluate=False): |
| if args.local_rank not in [-1, 0] and not evaluate: |
| torch.distributed.barrier() |
|
|
| processor = processors[task]() |
| output_mode = output_modes[task] |
| |
| cached_features_file = os.path.join( |
| args.data_dir, |
| "cached_{}_{}_{}_{}".format( |
| "dev" if evaluate else "train", |
| list(filter(None, args.model_name_or_path.split("/"))).pop(), |
| str(args.max_seq_length), |
| str(task), |
| ), |
| ) |
| if os.path.exists(cached_features_file) and not args.overwrite_cache: |
| logger.info("Loading features from cached file %s", cached_features_file) |
| features = torch.load(cached_features_file) |
| else: |
| logger.info("Creating features from dataset file at %s", args.data_dir) |
| label_list = processor.get_labels() |
| if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]: |
| |
| label_list[1], label_list[2] = label_list[2], label_list[1] |
| examples = ( |
| processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) |
| ) |
| features = convert_examples_to_features( |
| examples, |
| tokenizer, |
| max_length=args.max_seq_length, |
| label_list=label_list, |
| output_mode=output_mode, |
| ) |
| if args.local_rank in [-1, 0]: |
| logger.info("Saving features into cached file %s", cached_features_file) |
| torch.save(features, cached_features_file) |
|
|
| if args.local_rank == 0 and not evaluate: |
| torch.distributed.barrier() |
|
|
| |
| all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) |
| all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) |
| all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) |
| if output_mode == "classification": |
| all_labels = torch.tensor([f.label for f in features], dtype=torch.long) |
| elif output_mode == "regression": |
| all_labels = torch.tensor([f.label for f in features], dtype=torch.float) |
|
|
| dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) |
| return dataset |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
|
|
| |
| parser.add_argument( |
| "--data_dir", |
| default=None, |
| type=str, |
| required=True, |
| help="The input data dir. Should contain the .tsv files (or other data files) for the task.", |
| ) |
| parser.add_argument( |
| "--model_type", |
| default=None, |
| type=str, |
| required=True, |
| help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), |
| ) |
| parser.add_argument( |
| "--model_name_or_path", |
| default=None, |
| type=str, |
| required=True, |
| help="Path to pretrained model or model identifier from huggingface.co/models", |
| ) |
| parser.add_argument( |
| "--task_name", |
| default=None, |
| type=str, |
| required=True, |
| help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), |
| ) |
| parser.add_argument( |
| "--output_dir", |
| default=None, |
| type=str, |
| required=True, |
| help="The output directory where the model predictions and checkpoints will be written.", |
| ) |
| |
| parser.add_argument( |
| "--config_name", |
| default="", |
| type=str, |
| help="Pretrained config name or path if not the same as model_name", |
| ) |
| parser.add_argument( |
| "--tokenizer_name", |
| default="", |
| type=str, |
| help="Pretrained tokenizer name or path if not the same as model_name", |
| ) |
| parser.add_argument( |
| "--cache_dir", |
| default="", |
| type=str, |
| help="Where do you want to store the pre-trained models downloaded from huggingface.co", |
| ) |
| parser.add_argument( |
| "--max_seq_length", |
| default=128, |
| type=int, |
| help=( |
| "The maximum total input sequence length after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| ), |
| ) |
| parser.add_argument("--do_train", action="store_true", help="Whether to run training.") |
| parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") |
| parser.add_argument( |
| "--evaluate_during_training", |
| action="store_true", |
| help="Run evaluation during training at each logging step.", |
| ) |
| parser.add_argument( |
| "--do_lower_case", |
| action="store_true", |
| help="Set this flag if you are using an uncased model.", |
| ) |
|
|
| parser.add_argument( |
| "--per_gpu_train_batch_size", |
| default=8, |
| type=int, |
| help="Batch size per GPU/CPU for training.", |
| ) |
| parser.add_argument( |
| "--per_gpu_eval_batch_size", |
| default=8, |
| type=int, |
| help="Batch size per GPU/CPU for evaluation.", |
| ) |
| parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") |
|
|
| |
| parser.add_argument( |
| "--mask_scores_learning_rate", |
| default=1e-2, |
| type=float, |
| help="The Adam initial learning rate of the mask scores.", |
| ) |
| parser.add_argument( |
| "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." |
| ) |
| parser.add_argument( |
| "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." |
| ) |
| parser.add_argument( |
| "--initial_warmup", |
| default=1, |
| type=int, |
| help=( |
| "Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays" |
| "at its `initial_threshold` value (sparsity schedule)." |
| ), |
| ) |
| parser.add_argument( |
| "--final_warmup", |
| default=2, |
| type=int, |
| help=( |
| "Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays" |
| "at its final_threshold value (sparsity schedule)." |
| ), |
| ) |
|
|
| parser.add_argument( |
| "--pruning_method", |
| default="topK", |
| type=str, |
| help=( |
| "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," |
| " sigmoied_threshold = Soft movement pruning)." |
| ), |
| ) |
| parser.add_argument( |
| "--mask_init", |
| default="constant", |
| type=str, |
| help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", |
| ) |
| parser.add_argument( |
| "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." |
| ) |
|
|
| parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") |
| parser.add_argument( |
| "--final_lambda", |
| default=0.0, |
| type=float, |
| help="Regularization intensity (used in conjunction with `regularization`.", |
| ) |
|
|
| parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") |
| parser.add_argument( |
| "--global_topk_frequency_compute", |
| default=25, |
| type=int, |
| help="Frequency at which we compute the TopK global threshold.", |
| ) |
|
|
| |
| parser.add_argument( |
| "--teacher_type", |
| default=None, |
| type=str, |
| help=( |
| "Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for" |
| " distillation." |
| ), |
| ) |
| parser.add_argument( |
| "--teacher_name_or_path", |
| default=None, |
| type=str, |
| help="Path to the already fine-tuned teacher model. Only for distillation.", |
| ) |
| parser.add_argument( |
| "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." |
| ) |
| parser.add_argument( |
| "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." |
| ) |
| parser.add_argument( |
| "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." |
| ) |
|
|
| parser.add_argument( |
| "--gradient_accumulation_steps", |
| type=int, |
| default=1, |
| help="Number of updates steps to accumulate before performing a backward/update pass.", |
| ) |
| parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") |
| parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") |
| parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
| parser.add_argument( |
| "--num_train_epochs", |
| default=3.0, |
| type=float, |
| help="Total number of training epochs to perform.", |
| ) |
| parser.add_argument( |
| "--max_steps", |
| default=-1, |
| type=int, |
| help="If > 0: set total number of training steps to perform. Override num_train_epochs.", |
| ) |
| parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") |
|
|
| parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") |
| parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") |
| parser.add_argument( |
| "--eval_all_checkpoints", |
| action="store_true", |
| help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", |
| ) |
| parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") |
| parser.add_argument( |
| "--overwrite_output_dir", |
| action="store_true", |
| help="Overwrite the content of the output directory", |
| ) |
| parser.add_argument( |
| "--overwrite_cache", |
| action="store_true", |
| help="Overwrite the cached training and evaluation sets", |
| ) |
| parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") |
|
|
| parser.add_argument( |
| "--fp16", |
| action="store_true", |
| help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", |
| ) |
| parser.add_argument( |
| "--fp16_opt_level", |
| type=str, |
| default="O1", |
| help=( |
| "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." |
| "See details at https://nvidia.github.io/apex/amp.html" |
| ), |
| ) |
| parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
|
|
| args = parser.parse_args() |
|
|
| |
| if args.regularization == "null": |
| args.regularization = None |
|
|
| if ( |
| os.path.exists(args.output_dir) |
| and os.listdir(args.output_dir) |
| and args.do_train |
| and not args.overwrite_output_dir |
| ): |
| raise ValueError( |
| f"Output directory ({args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to" |
| " overcome." |
| ) |
|
|
| |
| if args.local_rank == -1 or args.no_cuda: |
| device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") |
| args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() |
| else: |
| torch.cuda.set_device(args.local_rank) |
| device = torch.device("cuda", args.local_rank) |
| torch.distributed.init_process_group(backend="nccl") |
| args.n_gpu = 1 |
| args.device = device |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, |
| ) |
| logger.warning( |
| "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", |
| args.local_rank, |
| device, |
| args.n_gpu, |
| bool(args.local_rank != -1), |
| args.fp16, |
| ) |
|
|
| |
| set_seed(args) |
|
|
| |
| args.task_name = args.task_name.lower() |
| if args.task_name not in processors: |
| raise ValueError("Task not found: %s" % (args.task_name)) |
| processor = processors[args.task_name]() |
| args.output_mode = output_modes[args.task_name] |
| label_list = processor.get_labels() |
| num_labels = len(label_list) |
|
|
| |
| if args.local_rank not in [-1, 0]: |
| torch.distributed.barrier() |
|
|
| args.model_type = args.model_type.lower() |
| config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] |
| config = config_class.from_pretrained( |
| args.config_name if args.config_name else args.model_name_or_path, |
| num_labels=num_labels, |
| finetuning_task=args.task_name, |
| cache_dir=args.cache_dir if args.cache_dir else None, |
| pruning_method=args.pruning_method, |
| mask_init=args.mask_init, |
| mask_scale=args.mask_scale, |
| ) |
| tokenizer = tokenizer_class.from_pretrained( |
| args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, |
| cache_dir=args.cache_dir if args.cache_dir else None, |
| do_lower_case=args.do_lower_case, |
| ) |
| model = model_class.from_pretrained( |
| args.model_name_or_path, |
| from_tf=bool(".ckpt" in args.model_name_or_path), |
| config=config, |
| cache_dir=args.cache_dir if args.cache_dir else None, |
| ) |
|
|
| if args.teacher_type is not None: |
| assert args.teacher_name_or_path is not None |
| assert args.alpha_distil > 0.0 |
| assert args.alpha_distil + args.alpha_ce > 0.0 |
| teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] |
| teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) |
| teacher = teacher_model_class.from_pretrained( |
| args.teacher_name_or_path, |
| from_tf=False, |
| config=teacher_config, |
| cache_dir=args.cache_dir if args.cache_dir else None, |
| ) |
| teacher.to(args.device) |
| else: |
| teacher = None |
|
|
| if args.local_rank == 0: |
| torch.distributed.barrier() |
|
|
| model.to(args.device) |
|
|
| logger.info("Training/evaluation parameters %s", args) |
|
|
| |
| if args.do_train: |
| train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) |
| global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) |
| logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) |
|
|
| |
| if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): |
| logger.info("Saving model checkpoint to %s", args.output_dir) |
| |
| |
| model_to_save = ( |
| model.module if hasattr(model, "module") else model |
| ) |
| model_to_save.save_pretrained(args.output_dir) |
| tokenizer.save_pretrained(args.output_dir) |
|
|
| |
| torch.save(args, os.path.join(args.output_dir, "training_args.bin")) |
|
|
| |
| model = model_class.from_pretrained(args.output_dir) |
| tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) |
| model.to(args.device) |
|
|
| |
| results = {} |
| if args.do_eval and args.local_rank in [-1, 0]: |
| tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) |
| checkpoints = [args.output_dir] |
| if args.eval_all_checkpoints: |
| checkpoints = [ |
| os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) |
| ] |
|
|
| logger.info("Evaluate the following checkpoints: %s", checkpoints) |
| for checkpoint in checkpoints: |
| global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" |
| prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" |
|
|
| model = model_class.from_pretrained(checkpoint) |
| model.to(args.device) |
| result = evaluate(args, model, tokenizer, prefix=prefix) |
| result = {k + "_{}".format(global_step): v for k, v in result.items()} |
| results.update(result) |
|
|
| return results |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|