| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition""" |
|
|
| import functools |
| import json |
| import logging |
| import os |
| import re |
| import sys |
| import warnings |
| from dataclasses import dataclass, field |
| from typing import Dict, List, Optional, Union |
|
|
| import datasets |
| import numpy as np |
| import torch |
| from torch.optim.lr_scheduler import LambdaLR |
| from datasets import DatasetDict, load_dataset, load_metric, load_from_disk |
|
|
| import bitsandbytes as bnb |
| import transformers |
| from transformers import ( |
| AutoConfig, |
| AutoFeatureExtractor, |
| AutoModelForCTC, |
| AutoProcessor, |
| AutoTokenizer, |
| HfArgumentParser, |
| Trainer, |
| TrainingArguments, |
| Wav2Vec2Processor, |
| set_seed, |
| ) |
| from transformers.trainer_pt_utils import get_parameter_names |
| from transformers.trainer_utils import get_last_checkpoint, is_main_process |
| from transformers.utils import check_min_version |
| from transformers.utils.versions import require_version |
|
|
|
|
| |
| check_min_version("4.16.0.dev0") |
|
|
| require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") |
|
|
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| def list_field(default=None, metadata=None): |
| return field(default_factory=lambda: default, metadata=metadata) |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
| ) |
| tokenizer_name_or_path: Optional[str] = field( |
| default=None, |
| metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"}, |
| ) |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, |
| ) |
| freeze_feature_encoder: bool = field( |
| default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} |
| ) |
| attention_dropout: float = field( |
| default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."} |
| ) |
| activation_dropout: float = field( |
| default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} |
| ) |
| feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."}) |
| hidden_dropout: float = field( |
| default=0.0, |
| metadata={ |
| "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler." |
| }, |
| ) |
| final_dropout: float = field( |
| default=0.0, |
| metadata={"help": "The dropout probability for the final projection layer."}, |
| ) |
| mask_time_prob: float = field( |
| default=0.05, |
| metadata={ |
| "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector" |
| "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature" |
| "vectors will be masked along the time axis." |
| }, |
| ) |
| mask_time_length: int = field( |
| default=10, |
| metadata={"help": "Length of vector span to mask along the time axis."}, |
| ) |
| mask_feature_prob: float = field( |
| default=0.0, |
| metadata={ |
| "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector" |
| "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis." |
| }, |
| ) |
| mask_feature_length: int = field( |
| default=10, |
| metadata={"help": "Length of vector span to mask along the feature axis."}, |
| ) |
| layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."}) |
| ctc_loss_reduction: Optional[str] = field( |
| default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."} |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| |
| Using `HfArgumentParser` we can turn this class |
| into argparse arguments to be able to specify them on |
| the command line. |
| """ |
|
|
| dataset_name: str = field( |
| metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| ) |
| dataset_config_name: str = field( |
| default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| ) |
| train_split_name: str = field( |
| default="train+validation", |
| metadata={ |
| "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" |
| }, |
| ) |
| eval_split_name: str = field( |
| default="test", |
| metadata={ |
| "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" |
| }, |
| ) |
| audio_column_name: str = field( |
| default="audio", |
| metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, |
| ) |
| text_column_name: str = field( |
| default="text", |
| metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"}, |
| ) |
| overwrite_cache: bool = field( |
| default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} |
| ) |
| preprocessing_num_workers: Optional[int] = field( |
| default=None, |
| metadata={"help": "The number of processes to use for the preprocessing."}, |
| ) |
| max_train_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": "For debugging purposes or quicker training, truncate the number of training examples to this " |
| "value if set." |
| }, |
| ) |
| max_eval_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": "For debugging purposes or quicker training, truncate the number of validation examples to this " |
| "value if set." |
| }, |
| ) |
| chars_to_ignore: Optional[List[str]] = list_field( |
| default=None, |
| metadata={"help": "A list of characters to remove from the transcripts."}, |
| ) |
| eval_metrics: List[str] = list_field( |
| default=["wer", "cer"], |
| metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"}, |
| ) |
| max_duration_in_seconds: float = field( |
| default=20.0, |
| metadata={ |
| "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`" |
| }, |
| ) |
| min_duration_in_seconds: float = field( |
| default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"} |
| ) |
| preprocessing_only: bool = field( |
| default=False, |
| metadata={ |
| "help": "Whether to only do data preprocessing and skip training. " |
| "This is especially useful when data preprocessing errors out in distributed training due to timeout. " |
| "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` " |
| "so that the cached datasets can consequently be loaded in distributed training" |
| }, |
| ) |
| use_auth_token: bool = field( |
| default=False, |
| metadata={ |
| "help": "If :obj:`True`, will use the token generated when running" |
| ":obj:`transformers-cli login` as HTTP bearer authorization for remote files." |
| }, |
| ) |
| unk_token: str = field( |
| default="[UNK]", |
| metadata={"help": "The unk token for the tokenizer"}, |
| ) |
| pad_token: str = field( |
| default="[PAD]", |
| metadata={"help": "The padding token for the tokenizer"}, |
| ) |
| word_delimiter_token: str = field( |
| default="|", |
| metadata={"help": "The word delimiter token for the tokenizer"}, |
| ) |
| phoneme_language: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": "The target language that should be used be" |
| " passed to the tokenizer for tokenization. Note that" |
| " this is only relevant if the model classifies the" |
| " input audio to a sequence of phoneme sequences." |
| }, |
| ) |
|
|
| @dataclass |
| class ExtraArguments: |
| "Additional training arguments" |
| bnb: bool = field( |
| default=False, |
| metadata = {"help":"If true uses 8bit Adam"} |
| ) |
| tristage_sched: bool = field( |
| default=False, |
| metadata = {"help":"If true uses tristage LR scheduler (refer to XLS-R paper)"} |
| ) |
| wandb_project: str = field( |
| default=None, |
| metadata = {"help":"Name of wandb project to log into"} |
| ) |
|
|
|
|
| @dataclass |
| class DataCollatorCTCWithPadding: |
| """ |
| Data collator that will dynamically pad the inputs received. |
| Args: |
| processor (:class:`~transformers.AutoProcessor`) |
| The processor used for proccessing the data. |
| padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): |
| Select a strategy to pad the returned sequences (according to the model's padding side and padding index) |
| among: |
| * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single |
| sequence if provided). |
| * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the |
| maximum acceptable input length for the model if that argument is not provided. |
| * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of |
| different lengths). |
| max_length (:obj:`int`, `optional`): |
| Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). |
| max_length_labels (:obj:`int`, `optional`): |
| Maximum length of the ``labels`` returned list and optionally padding length (see above). |
| pad_to_multiple_of (:obj:`int`, `optional`): |
| If set will pad the sequence to a multiple of the provided value. |
| This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= |
| 7.5 (Volta). |
| """ |
|
|
| processor: AutoProcessor |
| padding: Union[bool, str] = "longest" |
| pad_to_multiple_of: Optional[int] = None |
| pad_to_multiple_of_labels: Optional[int] = None |
|
|
| def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: |
| |
| |
| input_features = [{"input_values": feature["input_values"]} for feature in features] |
| label_features = [{"input_ids": feature["labels"]} for feature in features] |
|
|
| batch = self.processor.pad( |
| input_features, |
| padding=self.padding, |
| pad_to_multiple_of=self.pad_to_multiple_of, |
| return_tensors="pt", |
| ) |
|
|
| with self.processor.as_target_processor(): |
| labels_batch = self.processor.pad( |
| label_features, |
| padding=self.padding, |
| pad_to_multiple_of=self.pad_to_multiple_of_labels, |
| return_tensors="pt", |
| ) |
|
|
| |
| labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) |
|
|
| batch["labels"] = labels |
|
|
| return batch |
|
|
|
|
| def get_tri_stage_schedule( |
| optimizer, num_training_steps, ratios=[0.1, 0.4, 0.5], num_warmup_steps=None, num_hold_steps=None, start_ratio=0.01, end_ratio=0.05 |
| ): |
| assert (num_warmup_steps is None) == (num_hold_steps is None) |
| if num_warmup_steps is None: |
| num_warmup_steps = int(ratios[0]*num_training_steps) |
| num_hold_steps = int(ratios[1]*num_training_steps) |
| start_decay_step = num_warmup_steps + num_hold_steps |
| a_w, b_w = (1-start_ratio)/num_warmup_steps, start_ratio |
| num_decay_steps = num_training_steps - start_decay_step |
| a_d, b_d = (end_ratio-1)/num_decay_steps, 1. |
| |
| def lr_lambda(current_step): |
| if current_step < num_warmup_steps: |
| return a_w * float(current_step) + b_w |
| if current_step < start_decay_step: |
| return 1. |
| return max(end_ratio, a_d * float(current_step - start_decay_step) + b_d ) |
| |
| return LambdaLR(optimizer, lr_lambda) |
|
|
| def create_vocabulary_from_data( |
| datasets: DatasetDict, |
| word_delimiter_token: Optional[str] = None, |
| unk_token: Optional[str] = None, |
| pad_token: Optional[str] = None, |
| ): |
| |
| def extract_all_chars(batch): |
| all_text = " ".join(batch["target_text"]) |
| vocab = list(set(all_text)) |
| return {"vocab": [vocab], "all_text": [all_text]} |
|
|
| vocabs = datasets.map( |
| extract_all_chars, |
| batched=True, |
| batch_size=-1, |
| keep_in_memory=True, |
| remove_columns=datasets["train"].column_names, |
| ) |
|
|
| |
| vocab_set = functools.reduce( |
| lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values() |
| ) |
|
|
| vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))} |
|
|
| |
| if word_delimiter_token is not None: |
| vocab_dict[word_delimiter_token] = vocab_dict[" "] |
| del vocab_dict[" "] |
|
|
| |
| if unk_token is not None: |
| vocab_dict[unk_token] = len(vocab_dict) |
|
|
| if pad_token is not None: |
| vocab_dict[pad_token] = len(vocab_dict) |
|
|
| return vocab_dict |
|
|
|
|
| def main(): |
| |
| |
| |
|
|
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, ExtraArguments)) |
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args, extra_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| else: |
| model_args, data_args, training_args, extra_args = parser.parse_args_into_dataclasses() |
|
|
| |
| last_checkpoint = None |
| if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| "Use --overwrite_output_dir to overcome." |
| ) |
| elif last_checkpoint is not None: |
| logger.info( |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| ) |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) |
|
|
| |
| logger.warning( |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" |
| f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" |
| ) |
| |
| if is_main_process(training_args.local_rank): |
| transformers.utils.logging.set_verbosity_info() |
| logger.info("Training/evaluation parameters %s", training_args) |
|
|
| |
| set_seed(training_args.seed) |
|
|
| |
| os.environ["WANDB_PROJECT"] = extra_args.wandb_project |
|
|
| |
| raw_datasets = DatasetDict() |
|
|
| if training_args.do_train: |
| if data_args.dataset_name.endswith("/"): |
| raw_datasets["train"] = load_from_disk(f"{data_args.dataset_name}/{data_args.train_split_name}") |
| else: |
| raw_datasets["train"] = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| split=data_args.train_split_name, |
| use_auth_token=data_args.use_auth_token, |
| ) |
|
|
| if data_args.audio_column_name not in raw_datasets["train"].column_names: |
| raise ValueError( |
| f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. " |
| "Make sure to set `--audio_column_name` to the correct audio column - one of " |
| f"{', '.join(raw_datasets['train'].column_names)}." |
| ) |
|
|
| if data_args.text_column_name not in raw_datasets["train"].column_names: |
| raise ValueError( |
| f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. " |
| "Make sure to set `--text_column_name` to the correct text column - one of " |
| f"{', '.join(raw_datasets['train'].column_names)}." |
| ) |
|
|
| if data_args.max_train_samples is not None: |
| raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) |
|
|
| if training_args.do_eval: |
| if data_args.dataset_name.endswith("/"): |
| raw_datasets["eval"] = load_from_disk(f"{data_args.dataset_name}/{data_args.eval_split_name}") |
| else: |
| raw_datasets["eval"] = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| split=data_args.eval_split_name, |
| use_auth_token=data_args.use_auth_token, |
| ) |
| |
| if data_args.max_eval_samples is not None: |
| raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) |
|
|
| |
| |
| |
| |
| chars_to_ignore_regex = ( |
| f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None |
| ) |
| text_column_name = data_args.text_column_name |
|
|
| def remove_special_characters(batch): |
| if chars_to_ignore_regex is not None: |
| batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " " |
| else: |
| batch["target_text"] = batch[text_column_name].lower() + " " |
| return batch |
|
|
| with training_args.main_process_first(desc="dataset map special characters removal"): |
| raw_datasets = raw_datasets.map( |
| remove_special_characters, |
| remove_columns=[text_column_name], |
| desc="remove special characters from datasets", |
| ) |
|
|
| |
| word_delimiter_token = data_args.word_delimiter_token |
| unk_token = data_args.unk_token |
| pad_token = data_args.pad_token |
|
|
| |
| |
| |
| config = AutoConfig.from_pretrained( |
| model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token |
| ) |
|
|
| |
| |
| |
| |
| |
| tokenizer_name_or_path = model_args.tokenizer_name_or_path |
| tokenizer_kwargs = {} |
| if tokenizer_name_or_path is None: |
| |
| tokenizer_name_or_path = training_args.output_dir |
|
|
| vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") |
|
|
| with training_args.main_process_first(): |
| if training_args.overwrite_output_dir and os.path.isfile(vocab_file): |
| os.remove(vocab_file) |
|
|
| with training_args.main_process_first(desc="dataset map vocabulary creation"): |
| if not os.path.isfile(vocab_file): |
| os.makedirs(tokenizer_name_or_path, exist_ok=True) |
| vocab_dict = create_vocabulary_from_data( |
| raw_datasets, |
| word_delimiter_token=word_delimiter_token, |
| unk_token=unk_token, |
| pad_token=pad_token, |
| ) |
|
|
| |
| with open(vocab_file, "w") as file: |
| json.dump(vocab_dict, file) |
|
|
| |
| |
| tokenizer_kwargs = { |
| "config": config if config.tokenizer_class is not None else None, |
| "tokenizer_type": config.model_type if config.tokenizer_class is None else None, |
| "unk_token": unk_token, |
| "pad_token": pad_token, |
| "word_delimiter_token": word_delimiter_token, |
| } |
|
|
| |
| |
| |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained( |
| tokenizer_name_or_path, |
| use_auth_token=data_args.use_auth_token, |
| **tokenizer_kwargs, |
| ) |
| feature_extractor = AutoFeatureExtractor.from_pretrained( |
| model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token |
| ) |
|
|
| |
| config.update( |
| { |
| "feat_proj_dropout": model_args.feat_proj_dropout, |
| "attention_dropout": model_args.attention_dropout, |
| "hidden_dropout": model_args.hidden_dropout, |
| "final_dropout": model_args.final_dropout, |
| "mask_time_prob": model_args.mask_time_prob, |
| "mask_time_length": model_args.mask_time_length, |
| "mask_feature_prob": model_args.mask_feature_prob, |
| "mask_feature_length": model_args.mask_feature_length, |
| "gradient_checkpointing": training_args.gradient_checkpointing, |
| "layerdrop": model_args.layerdrop, |
| "ctc_loss_reduction": model_args.ctc_loss_reduction, |
| "pad_token_id": tokenizer.pad_token_id, |
| "vocab_size": len(tokenizer), |
| "activation_dropout": model_args.activation_dropout, |
| } |
| ) |
|
|
| |
| model = AutoModelForCTC.from_pretrained( |
| model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| config=config, |
| use_auth_token=data_args.use_auth_token, |
| ) |
|
|
| |
| if model_args.freeze_feature_encoder: |
| model.freeze_feature_encoder() |
|
|
| |
| |
| |
| |
|
|
| |
| dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate |
| if dataset_sampling_rate != feature_extractor.sampling_rate: |
| raw_datasets = raw_datasets.cast_column( |
| data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) |
| ) |
|
|
| |
| max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate |
| min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate |
| audio_column_name = data_args.audio_column_name |
| num_workers = data_args.preprocessing_num_workers |
|
|
| |
| phoneme_language = data_args.phoneme_language |
|
|
| |
| |
| def prepare_dataset(batch): |
| |
| sample = batch[audio_column_name] |
|
|
| inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) |
| batch["input_values"] = inputs.input_values[0] |
| batch["length"] = len(batch["input_values"]) |
|
|
| |
| additional_kwargs = {} |
| if phoneme_language is not None: |
| additional_kwargs["phonemizer_lang"] = phoneme_language |
|
|
| batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids |
| return batch |
|
|
| with training_args.main_process_first(desc="dataset map preprocessing"): |
| vectorized_datasets = raw_datasets.map( |
| prepare_dataset, |
| remove_columns=next(iter(raw_datasets.values())).column_names, |
| num_proc=num_workers, |
| desc="preprocess datasets", |
| ) |
|
|
| def is_audio_in_length_range(length): |
| return length > min_input_length and length < max_input_length |
|
|
| |
| vectorized_datasets = vectorized_datasets.filter( |
| is_audio_in_length_range, |
| num_proc=num_workers, |
| input_columns=["length"], |
| ) |
|
|
| |
| |
| |
|
|
| |
| eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics} |
|
|
| |
| |
| |
| |
| |
| if data_args.preprocessing_only: |
| logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}") |
| return |
|
|
| def compute_metrics(pred): |
| pred_logits = pred.predictions |
| pred_ids = np.argmax(pred_logits, axis=-1) |
|
|
| pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id |
|
|
| pred_str = tokenizer.batch_decode(pred_ids) |
| |
| label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False) |
|
|
| metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()} |
|
|
| return metrics |
|
|
| |
| if is_main_process(training_args.local_rank): |
| |
| feature_extractor.save_pretrained(training_args.output_dir) |
| tokenizer.save_pretrained(training_args.output_dir) |
| config.save_pretrained(training_args.output_dir) |
|
|
| try: |
| processor = AutoProcessor.from_pretrained(training_args.output_dir) |
| except (OSError, KeyError): |
| warnings.warn( |
| "Loading a processor from a feature extractor config that does not" |
| " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following " |
| " attribute to your `preprocessor_config.json` file to suppress this warning: " |
| " `'processor_class': 'Wav2Vec2Processor'`", |
| FutureWarning, |
| ) |
| processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir) |
|
|
| |
| data_collator = DataCollatorCTCWithPadding(processor=processor) |
|
|
| decay_parameters = get_parameter_names(model, [torch.nn.LayerNorm]) |
| decay_parameters = [name for name in decay_parameters if "bias" not in name] |
| optimizer_grouped_parameters = [ |
| { |
| "params": [p for n, p in model.named_parameters() if n in decay_parameters], |
| "weight_decay": training_args.weight_decay, |
| }, |
| { |
| "params": [p for n, p in model.named_parameters() if n not in decay_parameters], |
| "weight_decay": 0.0, |
| }, |
| ] |
| if extra_args.bnb: |
| optimizer = bnb.optim.Adam8bit( |
| params=optimizer_grouped_parameters, |
| lr=training_args.learning_rate, |
| betas=(training_args.adam_beta1, training_args.adam_beta2), |
| eps=training_args.adam_epsilon, |
| ) |
| else: |
| optimizer = torch.optim.AdamW( |
| params=optimizer_grouped_parameters, |
| lr=training_args.learning_rate, |
| betas=(training_args.adam_beta1, training_args.adam_beta2), |
| eps=training_args.adam_epsilon, |
| ) |
| if extra_args.tristage_sched: |
| scheduler = get_tri_stage_schedule(optimizer, training_args.max_steps) |
| else: |
| scheduler = None |
| optimizers = (optimizer, scheduler) |
|
|
| |
| trainer = Trainer( |
| model=model, |
| data_collator=data_collator, |
| args=training_args, |
| compute_metrics=compute_metrics, |
| train_dataset=vectorized_datasets["train"] if training_args.do_train else None, |
| eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, |
| tokenizer=feature_extractor, |
| optimizers=optimizers, |
| ) |
|
|
| |
|
|
| |
| if training_args.do_train: |
|
|
| |
| if last_checkpoint is not None: |
| checkpoint = last_checkpoint |
| elif os.path.isdir(model_args.model_name_or_path): |
| checkpoint = model_args.model_name_or_path |
| else: |
| checkpoint = None |
|
|
| train_result = trainer.train(resume_from_checkpoint=checkpoint) |
| trainer.save_model() |
|
|
| metrics = train_result.metrics |
| max_train_samples = ( |
| data_args.max_train_samples |
| if data_args.max_train_samples is not None |
| else len(vectorized_datasets["train"]) |
| ) |
| metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"])) |
|
|
| trainer.log_metrics("train", metrics) |
| trainer.save_metrics("train", metrics) |
| trainer.save_state() |
|
|
| |
| results = {} |
| if training_args.do_eval: |
| logger.info("*** Evaluate ***") |
| metrics = trainer.evaluate() |
| max_eval_samples = ( |
| data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"]) |
| ) |
| metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"])) |
|
|
| trainer.log_metrics("eval", metrics) |
| trainer.save_metrics("eval", metrics) |
|
|
| |
| config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na" |
| kwargs = { |
| "finetuned_from": model_args.model_name_or_path, |
| "tasks": "speech-recognition", |
| "tags": ["automatic-speech-recognition", data_args.dataset_name], |
| "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}", |
| "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}", |
| } |
| if "common_voice" in data_args.dataset_name: |
| kwargs["language"] = config_name |
|
|
| if training_args.push_to_hub: |
| trainer.push_to_hub(**kwargs) |
| else: |
| trainer.create_model_card(**kwargs) |
|
|
| return results |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|