| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """ |
| Fine-tuning the library models for tapex on table-based question answering tasks. |
| Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/summarization/run_summarization.py |
| """ |
|
|
| import logging |
| import os |
| import sys |
| from collections import defaultdict |
| from copy import deepcopy |
| from dataclasses import dataclass, field |
| from functools import partial |
| from typing import List, Optional |
|
|
| import nltk |
| import numpy as np |
| import pandas as pd |
| from datasets import load_dataset |
| from filelock import FileLock |
| from wikisql_utils import _TYPE_CONVERTER, retrieve_wikisql_query_answer_tapas |
|
|
| import transformers |
| from transformers import ( |
| AutoConfig, |
| BartForConditionalGeneration, |
| DataCollatorForSeq2Seq, |
| HfArgumentParser, |
| Seq2SeqTrainer, |
| Seq2SeqTrainingArguments, |
| TapexTokenizer, |
| set_seed, |
| ) |
| from transformers.file_utils import is_offline_mode |
| from transformers.trainer_utils import get_last_checkpoint, is_main_process |
| from transformers.utils import check_min_version |
|
|
|
|
| |
| check_min_version("4.17.0.dev0") |
|
|
| logger = logging.getLogger(__name__) |
|
|
| try: |
| nltk.data.find("tokenizers/punkt") |
| except (LookupError, OSError): |
| if is_offline_mode(): |
| raise LookupError( |
| "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" |
| ) |
| with FileLock(".lock") as lock: |
| nltk.download("punkt", quiet=True) |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, |
| ) |
| config_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "Pretrained tokenizer name or path if not the same as model_name. " |
| "By default we use BART-large tokenizer for TAPEX-large." |
| ) |
| }, |
| ) |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, |
| ) |
| use_fast_tokenizer: bool = field( |
| default=True, |
| metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, |
| ) |
| model_revision: str = field( |
| default="main", |
| metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| ) |
| use_auth_token: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Will use the token generated when running `huggingface-cli login` (necessary to use this script " |
| "with private models)." |
| ) |
| }, |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| """ |
|
|
| dataset_name: Optional[str] = field( |
| default="wikisql", metadata={"help": "The name of the dataset to use (via the datasets library)."} |
| ) |
| dataset_config_name: Optional[str] = field( |
| default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| ) |
| train_file: Optional[str] = field( |
| default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} |
| ) |
| validation_file: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." |
| ) |
| }, |
| ) |
| test_file: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." |
| }, |
| ) |
| overwrite_cache: bool = field( |
| default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} |
| ) |
| preprocessing_num_workers: Optional[int] = field( |
| default=None, |
| metadata={"help": "The number of processes to use for the preprocessing."}, |
| ) |
| max_source_length: Optional[int] = field( |
| default=1024, |
| metadata={ |
| "help": ( |
| "The maximum total input sequence length after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| ) |
| }, |
| ) |
| max_target_length: Optional[int] = field( |
| default=128, |
| metadata={ |
| "help": ( |
| "The maximum total sequence length for target text after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| ) |
| }, |
| ) |
| val_max_target_length: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The maximum total sequence length for validation target text after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`." |
| "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " |
| "during ``evaluate`` and ``predict``." |
| ) |
| }, |
| ) |
| pad_to_max_length: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to pad all samples to model maximum sentence length. " |
| "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " |
| "efficient on GPU but very bad for TPU." |
| ) |
| }, |
| ) |
| max_train_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of training examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| max_eval_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| max_predict_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of prediction examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| num_beams: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " |
| "which is used during ``evaluate`` and ``predict``." |
| ) |
| }, |
| ) |
| ignore_pad_token_for_loss: bool = field( |
| default=True, |
| metadata={ |
| "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." |
| }, |
| ) |
|
|
| def __post_init__(self): |
| if self.dataset_name is None and self.train_file is None and self.validation_file is None: |
| raise ValueError("Need either a dataset name or a training/validation file.") |
| else: |
| if self.train_file is not None: |
| extension = self.train_file.split(".")[-1] |
| assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." |
| if self.validation_file is not None: |
| extension = self.validation_file.split(".")[-1] |
| assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." |
| if self.val_max_target_length is None: |
| self.val_max_target_length = self.max_target_length |
|
|
|
|
| def main(): |
| |
| |
| |
|
|
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) |
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| else: |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| |
| last_checkpoint = None |
| if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| "Use --overwrite_output_dir to overcome." |
| ) |
| elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: |
| logger.info( |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| ) |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) |
|
|
| |
| logger.warning( |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" |
| + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" |
| ) |
| |
| if is_main_process(training_args.local_rank): |
| transformers.utils.logging.set_verbosity_info() |
| logger.info(f"Training/evaluation parameters {training_args}") |
|
|
| |
| set_seed(training_args.seed) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| if data_args.dataset_name is not None: |
| |
| datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) |
| else: |
| data_files = {} |
| if data_args.train_file is not None: |
| data_files["train"] = data_args.train_file |
| extension = data_args.train_file.split(".")[-1] |
| if data_args.validation_file is not None: |
| data_files["validation"] = data_args.validation_file |
| extension = data_args.validation_file.split(".")[-1] |
| if data_args.test_file is not None: |
| data_files["test"] = data_args.test_file |
| extension = data_args.test_file.split(".")[-1] |
| datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| config = AutoConfig.from_pretrained( |
| model_args.config_name if model_args.config_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| use_auth_token=True if model_args.use_auth_token else None, |
| ) |
|
|
| |
| |
| config.no_repeat_ngram_size = 0 |
| config.max_length = 1024 |
| config.early_stopping = False |
|
|
| |
| tokenizer = TapexTokenizer.from_pretrained( |
| model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast_tokenizer, |
| revision=model_args.model_revision, |
| use_auth_token=True if model_args.use_auth_token else None, |
| add_prefix_space=True, |
| ) |
|
|
| |
| model = BartForConditionalGeneration.from_pretrained( |
| model_args.model_name_or_path, |
| from_tf=bool(".ckpt" in model_args.model_name_or_path), |
| config=config, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| use_auth_token=True if model_args.use_auth_token else None, |
| ) |
|
|
| if model.config.decoder_start_token_id is None: |
| raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") |
|
|
| |
| |
| if training_args.do_train: |
| column_names = datasets["train"].column_names |
| elif training_args.do_eval: |
| column_names = datasets["validation"].column_names |
| elif training_args.do_predict: |
| column_names = datasets["test"].column_names |
| else: |
| logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") |
| return |
|
|
| |
| max_target_length = data_args.max_target_length |
| padding = "max_length" if data_args.pad_to_max_length else False |
|
|
| if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): |
| logger.warning( |
| "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for" |
| f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" |
| ) |
|
|
| def preprocess_tableqa_function(examples, is_training=False): |
| """ |
| The is_training FLAG is used to identify if we could use the supervision |
| to truncate the table content if it is required. |
| """ |
|
|
| |
| |
| def _convert_table_types(_table): |
| """Runs the type converter over the table cells.""" |
| ret_table = deepcopy(_table) |
| types = ret_table["types"] |
| ret_table["real_rows"] = ret_table["rows"] |
| typed_rows = [] |
| for row in ret_table["rows"]: |
| typed_row = [] |
| for column, cell_value in enumerate(row): |
| typed_row.append(_TYPE_CONVERTER[types[column]](cell_value)) |
| typed_rows.append(typed_row) |
| ret_table["rows"] = typed_rows |
| return ret_table |
|
|
| questions = [question.lower() for question in examples["question"]] |
| example_tables = examples["table"] |
| example_sqls = examples["sql"] |
| tables = [ |
| pd.DataFrame.from_records(example_table["rows"], columns=example_table["header"]) |
| for example_table in example_tables |
| ] |
|
|
| |
| answers = [] |
| for example_sql, example_table in zip(example_sqls, example_tables): |
| tapas_table = _convert_table_types(example_table) |
| answer_list: List[str] = retrieve_wikisql_query_answer_tapas(tapas_table, example_sql) |
| |
| answers.append(answer_list) |
|
|
| |
| |
| if is_training: |
| model_inputs = tokenizer( |
| table=tables, |
| query=questions, |
| answer=answers, |
| max_length=data_args.max_source_length, |
| padding=padding, |
| truncation=True, |
| ) |
| else: |
| model_inputs = tokenizer( |
| table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True |
| ) |
|
|
| labels = tokenizer( |
| answer=[", ".join(answer) for answer in answers], |
| max_length=max_target_length, |
| padding=padding, |
| truncation=True, |
| ) |
|
|
| |
| |
| if padding == "max_length" and data_args.ignore_pad_token_for_loss: |
| labels["input_ids"] = [ |
| [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] |
| ] |
|
|
| model_inputs["labels"] = labels["input_ids"] |
|
|
| return model_inputs |
|
|
| |
| preprocess_tableqa_function_training = partial(preprocess_tableqa_function, is_training=True) |
|
|
| if training_args.do_train: |
| if "train" not in datasets: |
| raise ValueError("--do_train requires a train dataset") |
| train_dataset = datasets["train"] |
| if data_args.max_train_samples is not None: |
| train_dataset = train_dataset.select(range(data_args.max_train_samples)) |
| train_dataset = train_dataset.map( |
| preprocess_tableqa_function_training, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| remove_columns=column_names, |
| load_from_cache_file=not data_args.overwrite_cache, |
| ) |
|
|
| if training_args.do_eval: |
| max_target_length = data_args.val_max_target_length |
| if "validation" not in datasets: |
| raise ValueError("--do_eval requires a validation dataset") |
| eval_dataset = datasets["validation"] |
| if data_args.max_eval_samples is not None: |
| eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) |
| eval_dataset = eval_dataset.map( |
| preprocess_tableqa_function, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| remove_columns=column_names, |
| load_from_cache_file=not data_args.overwrite_cache, |
| ) |
|
|
| if training_args.do_predict: |
| max_target_length = data_args.val_max_target_length |
| if "test" not in datasets: |
| raise ValueError("--do_predict requires a test dataset") |
| predict_dataset = datasets["test"] |
| if data_args.max_predict_samples is not None: |
| predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) |
| predict_dataset = predict_dataset.map( |
| preprocess_tableqa_function, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| remove_columns=column_names, |
| load_from_cache_file=not data_args.overwrite_cache, |
| ) |
|
|
| |
| label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id |
| data_collator = DataCollatorForSeq2Seq( |
| tokenizer, |
| model=model, |
| label_pad_token_id=label_pad_token_id, |
| pad_to_multiple_of=8 if training_args.fp16 else None, |
| ) |
|
|
| def postprocess_text(preds, labels): |
| preds = [pred.strip() for pred in preds] |
| labels = [label.strip() for label in labels] |
|
|
| return preds, labels |
|
|
| def compute_metrics(eval_preds): |
| preds, labels = eval_preds |
| if isinstance(preds, tuple): |
| preds = preds[0] |
| decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) |
| if data_args.ignore_pad_token_for_loss: |
| |
| labels = np.where(labels != -100, labels, tokenizer.pad_token_id) |
| decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) |
|
|
| |
| decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) |
|
|
| delimiter = ", " |
|
|
| |
| def evaluate_example(predict_str: str, ground_str: str): |
| predict_spans = predict_str.split(delimiter) |
| ground_spans = ground_str.split(delimiter) |
| predict_values = defaultdict(lambda: 0) |
| ground_values = defaultdict(lambda: 0) |
| for span in predict_spans: |
| try: |
| predict_values[float(span)] += 1 |
| except ValueError: |
| predict_values[span.strip()] += 1 |
| for span in ground_spans: |
| try: |
| ground_values[float(span)] += 1 |
| except ValueError: |
| ground_values[span.strip()] += 1 |
| is_correct = predict_values == ground_values |
| return is_correct |
|
|
| def get_denotation_accuracy(predictions: List[str], references: List[str]): |
| assert len(predictions) == len(references) |
| correct_num = 0 |
| for predict_str, ground_str in zip(predictions, references): |
| is_correct = evaluate_example(predict_str.lower(), ground_str.lower()) |
| if is_correct: |
| correct_num += 1 |
| return correct_num / len(predictions) |
|
|
| accuracy = get_denotation_accuracy(decoded_preds, decoded_labels) |
| result = {"denotation_accuracy": accuracy} |
|
|
| return result |
|
|
| |
| trainer = Seq2SeqTrainer( |
| model=model, |
| args=training_args, |
| train_dataset=train_dataset if training_args.do_train else None, |
| eval_dataset=eval_dataset if training_args.do_eval else None, |
| tokenizer=tokenizer, |
| data_collator=data_collator, |
| compute_metrics=compute_metrics if training_args.predict_with_generate else None, |
| ) |
|
|
| if training_args.do_train: |
| checkpoint = None |
| if training_args.resume_from_checkpoint is not None: |
| checkpoint = training_args.resume_from_checkpoint |
| elif last_checkpoint is not None: |
| checkpoint = last_checkpoint |
| train_result = trainer.train(resume_from_checkpoint=checkpoint) |
| trainer.save_model() |
|
|
| metrics = train_result.metrics |
| max_train_samples = ( |
| data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) |
| ) |
| metrics["train_samples"] = min(max_train_samples, len(train_dataset)) |
|
|
| trainer.log_metrics("train", metrics) |
| trainer.save_metrics("train", metrics) |
| trainer.save_state() |
|
|
| |
| results = {} |
| if training_args.do_eval: |
| logger.info("*** Evaluate ***") |
|
|
| metrics = trainer.evaluate( |
| max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="eval" |
| ) |
| max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) |
| metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) |
|
|
| trainer.log_metrics("eval", metrics) |
| trainer.save_metrics("eval", metrics) |
|
|
| if training_args.do_predict: |
| logger.info("*** Predict ***") |
|
|
| predict_results = trainer.predict( |
| predict_dataset, |
| metric_key_prefix="predict", |
| max_length=data_args.val_max_target_length, |
| num_beams=data_args.num_beams, |
| ) |
| metrics = predict_results.metrics |
| max_predict_samples = ( |
| data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) |
| ) |
| metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) |
|
|
| trainer.log_metrics("predict", metrics) |
| trainer.save_metrics("predict", metrics) |
|
|
| if trainer.is_world_process_zero(): |
| if training_args.predict_with_generate: |
| predictions = tokenizer.batch_decode( |
| predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True |
| ) |
| predictions = [pred.strip() for pred in predictions] |
| output_prediction_file = os.path.join(training_args.output_dir, "tapex_predictions.txt") |
| with open(output_prediction_file, "w") as writer: |
| writer.write("\n".join(predictions)) |
|
|
| return results |
|
|
|
|
| def _mp_fn(index): |
| |
| main() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|