| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ Fine-tuning the library models for named entity recognition.""" |
|
|
|
|
| import logging |
| import os |
| from dataclasses import dataclass, field |
| from importlib import import_module |
| from typing import Dict, List, Optional, Tuple |
|
|
| import numpy as np |
| from seqeval.metrics import classification_report, f1_score, precision_score, recall_score |
| from utils_ner import Split, TFTokenClassificationDataset, TokenClassificationTask |
|
|
| from transformers import ( |
| AutoConfig, |
| AutoTokenizer, |
| EvalPrediction, |
| HfArgumentParser, |
| TFAutoModelForTokenClassification, |
| TFTrainer, |
| TFTrainingArguments, |
| ) |
| from transformers.utils import logging as hf_logging |
|
|
|
|
| hf_logging.set_verbosity_info() |
| hf_logging.enable_default_handler() |
| hf_logging.enable_explicit_format() |
|
|
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
| ) |
| config_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
| ) |
| task_type: Optional[str] = field( |
| default="NER", metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} |
| ) |
| use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."}) |
| |
| |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| """ |
|
|
| data_dir: str = field( |
| metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} |
| ) |
| labels: Optional[str] = field( |
| metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} |
| ) |
| max_seq_length: int = field( |
| default=128, |
| metadata={ |
| "help": ( |
| "The maximum total input sequence length after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| ) |
| }, |
| ) |
| overwrite_cache: bool = field( |
| default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} |
| ) |
|
|
|
|
| def main(): |
| |
| |
| |
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| if ( |
| os.path.exists(training_args.output_dir) |
| and os.listdir(training_args.output_dir) |
| and training_args.do_train |
| and not training_args.overwrite_output_dir |
| ): |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" |
| " --overwrite_output_dir to overcome." |
| ) |
|
|
| module = import_module("tasks") |
|
|
| try: |
| token_classification_task_clazz = getattr(module, model_args.task_type) |
| token_classification_task: TokenClassificationTask = token_classification_task_clazz() |
| except AttributeError: |
| raise ValueError( |
| f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. " |
| f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" |
| ) |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| level=logging.INFO, |
| ) |
| logger.info( |
| "n_replicas: %s, distributed training: %s, 16-bits training: %s", |
| training_args.n_replicas, |
| bool(training_args.n_replicas > 1), |
| training_args.fp16, |
| ) |
| logger.info("Training/evaluation parameters %s", training_args) |
|
|
| |
| labels = token_classification_task.get_labels(data_args.labels) |
| label_map: Dict[int, str] = dict(enumerate(labels)) |
| num_labels = len(labels) |
|
|
| |
| |
| |
| |
| |
|
|
| config = AutoConfig.from_pretrained( |
| model_args.config_name if model_args.config_name else model_args.model_name_or_path, |
| num_labels=num_labels, |
| id2label=label_map, |
| label2id={label: i for i, label in enumerate(labels)}, |
| cache_dir=model_args.cache_dir, |
| ) |
| tokenizer = AutoTokenizer.from_pretrained( |
| model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast, |
| ) |
|
|
| with training_args.strategy.scope(): |
| model = TFAutoModelForTokenClassification.from_pretrained( |
| model_args.model_name_or_path, |
| from_pt=bool(".bin" in model_args.model_name_or_path), |
| config=config, |
| cache_dir=model_args.cache_dir, |
| ) |
|
|
| |
| train_dataset = ( |
| TFTokenClassificationDataset( |
| token_classification_task=token_classification_task, |
| data_dir=data_args.data_dir, |
| tokenizer=tokenizer, |
| labels=labels, |
| model_type=config.model_type, |
| max_seq_length=data_args.max_seq_length, |
| overwrite_cache=data_args.overwrite_cache, |
| mode=Split.train, |
| ) |
| if training_args.do_train |
| else None |
| ) |
| eval_dataset = ( |
| TFTokenClassificationDataset( |
| token_classification_task=token_classification_task, |
| data_dir=data_args.data_dir, |
| tokenizer=tokenizer, |
| labels=labels, |
| model_type=config.model_type, |
| max_seq_length=data_args.max_seq_length, |
| overwrite_cache=data_args.overwrite_cache, |
| mode=Split.dev, |
| ) |
| if training_args.do_eval |
| else None |
| ) |
|
|
| def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]: |
| preds = np.argmax(predictions, axis=2) |
| batch_size, seq_len = preds.shape |
| out_label_list = [[] for _ in range(batch_size)] |
| preds_list = [[] for _ in range(batch_size)] |
|
|
| for i in range(batch_size): |
| for j in range(seq_len): |
| if label_ids[i, j] != -100: |
| out_label_list[i].append(label_map[label_ids[i][j]]) |
| preds_list[i].append(label_map[preds[i][j]]) |
|
|
| return preds_list, out_label_list |
|
|
| def compute_metrics(p: EvalPrediction) -> Dict: |
| preds_list, out_label_list = align_predictions(p.predictions, p.label_ids) |
|
|
| return { |
| "precision": precision_score(out_label_list, preds_list), |
| "recall": recall_score(out_label_list, preds_list), |
| "f1": f1_score(out_label_list, preds_list), |
| } |
|
|
| |
| trainer = TFTrainer( |
| model=model, |
| args=training_args, |
| train_dataset=train_dataset.get_dataset() if train_dataset else None, |
| eval_dataset=eval_dataset.get_dataset() if eval_dataset else None, |
| compute_metrics=compute_metrics, |
| ) |
|
|
| |
| if training_args.do_train: |
| trainer.train() |
| trainer.save_model() |
| tokenizer.save_pretrained(training_args.output_dir) |
|
|
| |
| results = {} |
| if training_args.do_eval: |
| logger.info("*** Evaluate ***") |
|
|
| result = trainer.evaluate() |
| output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt") |
|
|
| with open(output_eval_file, "w") as writer: |
| logger.info("***** Eval results *****") |
|
|
| for key, value in result.items(): |
| logger.info(" %s = %s", key, value) |
| writer.write("%s = %s\n" % (key, value)) |
|
|
| results.update(result) |
|
|
| |
| if training_args.do_predict: |
| test_dataset = TFTokenClassificationDataset( |
| token_classification_task=token_classification_task, |
| data_dir=data_args.data_dir, |
| tokenizer=tokenizer, |
| labels=labels, |
| model_type=config.model_type, |
| max_seq_length=data_args.max_seq_length, |
| overwrite_cache=data_args.overwrite_cache, |
| mode=Split.test, |
| ) |
|
|
| predictions, label_ids, metrics = trainer.predict(test_dataset.get_dataset()) |
| preds_list, labels_list = align_predictions(predictions, label_ids) |
| report = classification_report(labels_list, preds_list) |
|
|
| logger.info("\n%s", report) |
|
|
| output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt") |
|
|
| with open(output_test_results_file, "w") as writer: |
| writer.write("%s\n" % report) |
|
|
| |
| output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") |
|
|
| with open(output_test_predictions_file, "w") as writer: |
| with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f: |
| example_id = 0 |
|
|
| for line in f: |
| if line.startswith("-DOCSTART-") or line == "" or line == "\n": |
| writer.write(line) |
|
|
| if not preds_list[example_id]: |
| example_id += 1 |
| elif preds_list[example_id]: |
| output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n" |
|
|
| writer.write(output_line) |
| else: |
| logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0]) |
|
|
| return results |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|