| import argparse |
| import json |
| import math |
| import os |
| import random |
| from dataclasses import dataclass |
| from itertools import chain |
| from pathlib import Path |
| from typing import Optional, Union |
|
|
| import datasets |
| import torch |
| from datasets import load_dataset |
| from torch.utils.data import DataLoader |
| from tqdm.auto import tqdm |
|
|
| import evaluate |
| import transformers |
| from accelerate import Accelerator |
| from accelerate.utils import set_seed |
| from huggingface_hub import Repository |
| from transformers import ( |
| CONFIG_MAPPING, |
| MODEL_MAPPING, |
| AutoConfig, |
| AutoModelForMultipleChoice, |
| AutoTokenizer, |
| PreTrainedTokenizerBase, |
| SchedulerType, |
| default_data_collator, |
| get_scheduler, |
| ) |
| from transformers.utils import PaddingStrategy, check_min_version, get_full_repo_name, send_example_telemetry |
|
|
|
|
| |
| MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) |
| MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) |
|
|
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser(description="Finetune a transformers model on a multiple choice task") |
| parser.add_argument( |
| "--dataset_name", |
| type=str, |
| default=None, |
| help="The name of the dataset to use (via the datasets library).", |
| ) |
| parser.add_argument( |
| "--dataset_config_name", |
| type=str, |
| default=None, |
| help="The configuration name of the dataset to use (via the datasets library).", |
| ) |
| parser.add_argument( |
| "--test_file", type=str, default=None, help="A csv or a json file containing the test data." |
| ) |
| parser.add_argument( |
| "--max_seq_length", |
| type=int, |
| default=128, |
| help=( |
| "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," |
| " sequences shorter will be padded if `--pad_to_max_lengh` is passed." |
| ), |
| ) |
| parser.add_argument( |
| "--pad_to_max_length", |
| action="store_true", |
| help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", |
| ) |
| parser.add_argument( |
| "--model_name_or_path", |
| type=str, |
| help="Path to pretrained model or model identifier from huggingface.co/models.", |
| required=False, |
| ) |
| parser.add_argument( |
| "--config_name", |
| type=str, |
| default=None, |
| help="Pretrained config name or path if not the same as model_name", |
| ) |
| parser.add_argument( |
| "--tokenizer_name", |
| type=str, |
| default=None, |
| help="Pretrained tokenizer name or path if not the same as model_name", |
| ) |
| parser.add_argument( |
| "--per_device_test_batch_size", |
| type=int, |
| default=8, |
| help="Batch size (per device) for the test dataloader.", |
| ) |
| parser.add_argument( |
| "--gradient_accumulation_steps", |
| type=int, |
| default=1, |
| help="Gradient accumulation steps." |
| ) |
| parser.add_argument("--use_slow_tokenizer") |
| parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| parser.add_argument( |
| "--model_type", |
| type=str, |
| default=None, |
| help="Model type to use if training from scratch.", |
| choices=MODEL_TYPES, |
| ) |
| parser.add_argument( |
| "--debug", |
| action="store_true", |
| help="Activate debug mode and run training only with a subset of data.", |
| ) |
|
|
| args = parser.parse_args() |
| return args |
|
|
|
|
| @dataclass |
| class DataCollatorForMultipleChoice: |
| """ |
| Data collator that will dynamically pad the inputs for multiple choice received. |
| Args: |
| tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): |
| The tokenizer used for encoding the data. |
| padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): |
| Select a strategy to pad the returned sequences (according to the model's padding side and padding index) |
| among: |
| - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence |
| if provided). |
| - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum |
| acceptable input length for the model if that argument is not provided. |
| - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different |
| lengths). |
| max_length (`int`, *optional*): |
| Maximum length of the returned list and optionally padding length (see above). |
| pad_to_multiple_of (`int`, *optional*): |
| If set will pad the sequence to a multiple of the provided value. |
| This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= |
| 7.5 (Volta). |
| """ |
|
|
| tokenizer: PreTrainedTokenizerBase |
| padding: Union[bool, str, PaddingStrategy] = True |
| max_length: Optional[int] = None |
| pad_to_multiple_of: Optional[int] = None |
|
|
| def __call__(self, features): |
| label_name = "label" if "label" in features[0].keys() else "labels" |
| labels = [feature.pop(label_name) for feature in features] |
| batch_size = len(features) |
| num_choices = len(features[0]["input_ids"]) |
| flattened_features = [ |
| [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features |
| ] |
| flattened_features = list(chain(*flattened_features)) |
|
|
| batch = self.tokenizer.pad( |
| flattened_features, |
| padding=self.padding, |
| max_length=self.max_length, |
| pad_to_multiple_of=self.pad_to_multiple_of, |
| return_tensors="pt", |
| ) |
|
|
| |
| batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()} |
| |
| batch["labels"] = torch.tensor(labels, dtype=torch.int64) |
| return batch |
|
|
|
|
| def main(): |
| args = parse_args() |
|
|
| |
| |
| send_example_telemetry("run_swag_no_trainer", args) |
|
|
| |
| |
| |
| accelerator_log_kwargs = {} |
| accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) |
|
|
| |
| if args.seed is not None: |
| set_seed(args.seed) |
|
|
| accelerator.wait_for_everyone() |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| if args.dataset_name is not None: |
| |
| raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) |
| else: |
| data_files = {} |
| if args.test_file is not None: |
| data_files["test"] = args.test_file |
| extension = args.test_file.split(".")[-1] |
| raw_datasets = load_dataset(extension, data_files=data_files) |
|
|
| if args.debug: |
| for split in raw_datasets.keys(): |
| raw_datasets[split] = raw_datasets[split].select(range(100)) |
| |
| |
|
|
|
|
| column_names = raw_datasets["test"].column_names |
|
|
| |
| ending_names = [f"ending{i}" for i in range(4)] |
| context_name = "sent1" |
| question_header_name = "sent2" |
| label_column_name = "label" if "label" in column_names else "labels" |
|
|
| |
| |
| |
| |
| if args.config_name: |
| config = AutoConfig.from_pretrained(args.model_name_or_path) |
| elif args.model_name_or_path: |
| config = AutoConfig.from_pretrained(args.model_name_or_path) |
| else: |
| config = CONFIG_MAPPING[args.model_type]() |
|
|
| if args.tokenizer_name: |
| tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) |
| elif args.model_name_or_path: |
| tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) |
| else: |
| raise ValueError( |
| "You are instantiating a new tokenizer from scratch. This is not supported by this script." |
| "You can do it from another script, save it, and load it from here, using --tokenizer_name." |
| ) |
|
|
| if args.model_name_or_path: |
| model = AutoModelForMultipleChoice.from_pretrained( |
| args.model_name_or_path, |
| from_tf=bool(".ckpt" in args.model_name_or_path), |
| config=config, |
| ) |
| else: |
| model = AutoModelForMultipleChoice.from_config(config) |
|
|
| |
| |
| embedding_size = model.get_input_embeddings().weight.shape[0] |
| if len(tokenizer) > embedding_size: |
| model.resize_token_embeddings(len(tokenizer)) |
|
|
| |
| |
| padding = "max_length" if args.pad_to_max_length else False |
|
|
| def preprocess_function(examples): |
| first_sentences = [[context] * 4 for context in examples[context_name]] |
| question_headers = examples[question_header_name] |
| second_sentences = [ |
| [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) |
| ] |
| labels = examples[label_column_name] |
|
|
| |
| first_sentences = list(chain(*first_sentences)) |
| second_sentences = list(chain(*second_sentences)) |
|
|
| |
| tokenized_examples = tokenizer( |
| first_sentences, |
| second_sentences, |
| max_length=args.max_seq_length, |
| padding=padding, |
| truncation=True, |
| ) |
| |
| tokenized_inputs = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()} |
| tokenized_inputs["labels"] = labels |
| return tokenized_inputs |
|
|
| with accelerator.main_process_first(): |
| processed_datasets = raw_datasets.map( |
| preprocess_function, batched=True, remove_columns=raw_datasets["test"].column_names |
| ) |
|
|
| test_dataset = processed_datasets["test"] |
|
|
| |
| if args.pad_to_max_length: |
| |
| |
| data_collator = default_data_collator |
|
|
| else: |
| |
| |
| |
| data_collator = DataCollatorForMultipleChoice( |
| tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None) |
| ) |
|
|
| test_dataloader = DataLoader(test_dataset, collate_fn=data_collator, batch_size=args.per_device_test_batch_size) |
|
|
| |
| device = accelerator.device |
| model.to(device) |
|
|
| |
| model, test_dataloader = accelerator.prepare( |
| model, test_dataloader |
| ) |
|
|
| |
|
|
| acc = evaluate.load("accuracy") |
| prec = evaluate.load("precision") |
| rec = evaluate.load("recall") |
| f1 = evaluate.load("f1") |
|
|
| model.eval() |
| for step, batch in tqdm(enumerate(test_dataloader)): |
| with torch.no_grad(): |
| outputs = model(**batch) |
| predictions = outputs.logits.argmax(dim=-1) |
| predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) |
| acc.add_batch( |
| predictions=predictions, |
| references=references, |
| ) |
| prec.add_batch( |
| predictions=predictions, |
| references=references, |
| ) |
| rec.add_batch( |
| predictions=predictions, |
| references=references, |
| ) |
| f1.add_batch( |
| predictions=predictions, |
| references=references, |
| ) |
| acc_metric = acc.compute() |
| prec_metric = prec.compute(average="weighted") |
| rec_metric = rec.compute(average="weighted") |
| f1_metric = f1.compute(average="weighted") |
| accelerator.print(args.model_name_or_path) |
| accelerator.print(f"Accuracy: {acc_metric}, Precision:{prec_metric}, Recall:{rec_metric}, F1:{f1_metric}") |
|
|
| if __name__ == "__main__": |
| main() |