| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Fine-tuning the library models for sequence to sequence speech recognition. |
| """ |
| |
| |
|
|
| import logging |
| import os |
| import sys |
| from dataclasses import dataclass, field |
| from typing import Any, Dict, List, Optional, Union |
|
|
| import datasets |
| import evaluate |
| import torch |
| from datasets import DatasetDict, load_dataset |
|
|
| import transformers |
| from transformers import ( |
| AutoConfig, |
| AutoFeatureExtractor, |
| AutoModelForSpeechSeq2Seq, |
| AutoProcessor, |
| AutoTokenizer, |
| HfArgumentParser, |
| Seq2SeqTrainer, |
| Seq2SeqTrainingArguments, |
| set_seed, |
| ) |
| from transformers.trainer_utils import get_last_checkpoint, is_main_process |
| from transformers.utils import check_min_version, send_example_telemetry |
| from transformers.utils.versions import require_version |
|
|
|
|
| |
| check_min_version("4.50.0.dev0") |
|
|
| require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
| ) |
| config_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} |
| ) |
| feature_extractor_name: Optional[str] = field( |
| default=None, metadata={"help": "feature extractor name or path if not the same as model_name"} |
| ) |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, |
| ) |
| use_fast_tokenizer: bool = field( |
| default=True, |
| metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, |
| ) |
| model_revision: str = field( |
| default="main", |
| metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| ) |
| token: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " |
| "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." |
| ) |
| }, |
| ) |
| trust_remote_code: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to trust the execution of code from datasets/models defined on the Hub." |
| " This option should only be set to `True` for repositories you trust and in which you have read the" |
| " code, as it will execute code present on the Hub on your local machine." |
| ) |
| }, |
| ) |
| freeze_feature_encoder: bool = field( |
| default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} |
| ) |
| freeze_encoder: bool = field( |
| default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."} |
| ) |
| forced_decoder_ids: List[List[int]] = field( |
| default=None, |
| metadata={"help": "Deprecated. Please use the `language` and `task` arguments instead."}, |
| ) |
| suppress_tokens: List[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "Deprecated. The use of `suppress_tokens` should not be required for the majority of fine-tuning examples." |
| "Should you need to use `suppress_tokens`, please manually update them in the fine-tuning script directly." |
| ) |
| }, |
| ) |
| apply_spec_augment: bool = field( |
| default=False, |
| metadata={ |
| "help": "Whether to apply *SpecAugment* data augmentation to the input features. This is currently only relevant for Wav2Vec2, HuBERT, WavLM and Whisper models." |
| }, |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| """ |
|
|
| dataset_name: str = field( |
| default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} |
| ) |
| dataset_config_name: Optional[str] = field( |
| default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| ) |
| overwrite_cache: bool = field( |
| default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} |
| ) |
| preprocessing_num_workers: Optional[int] = field( |
| default=None, |
| metadata={"help": "The number of processes to use for the preprocessing."}, |
| ) |
| max_train_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of training examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| max_eval_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| audio_column_name: str = field( |
| default="audio", |
| metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, |
| ) |
| text_column_name: str = field( |
| default="text", |
| metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"}, |
| ) |
| max_duration_in_seconds: float = field( |
| default=20.0, |
| metadata={ |
| "help": ( |
| "Truncate audio files that are longer than `max_duration_in_seconds` seconds to" |
| " 'max_duration_in_seconds`" |
| ) |
| }, |
| ) |
| min_duration_in_seconds: float = field( |
| default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"} |
| ) |
| preprocessing_only: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to only do data preprocessing and skip training. This is especially useful when data" |
| " preprocessing errors out in distributed training due to timeout. In this case, one should run the" |
| " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets" |
| " can consequently be loaded in distributed training" |
| ) |
| }, |
| ) |
| train_split_name: str = field( |
| default="train", |
| metadata={ |
| "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" |
| }, |
| ) |
| eval_split_name: str = field( |
| default="test", |
| metadata={ |
| "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" |
| }, |
| ) |
| do_lower_case: bool = field( |
| default=True, |
| metadata={"help": "Whether the target text should be lower cased."}, |
| ) |
| language: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning " |
| "only. For English speech recognition, it should be set to `None`." |
| ) |
| }, |
| ) |
| task: str = field( |
| default="transcribe", |
| metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."}, |
| ) |
|
|
|
|
| @dataclass |
| class DataCollatorSpeechSeq2SeqWithPadding: |
| """ |
| Data collator that will dynamically pad the inputs received. |
| Args: |
| processor ([`WhisperProcessor`]) |
| The processor used for processing the data. |
| decoder_start_token_id (`int`) |
| The begin-of-sentence of the decoder. |
| forward_attention_mask (`bool`) |
| Whether to return attention_mask. |
| """ |
|
|
| processor: Any |
| decoder_start_token_id: int |
| forward_attention_mask: bool |
|
|
| def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: |
| |
| |
| model_input_name = self.processor.model_input_names[0] |
| input_features = [{model_input_name: feature[model_input_name]} for feature in features] |
| label_features = [{"input_ids": feature["labels"]} for feature in features] |
|
|
| batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") |
|
|
| if self.forward_attention_mask: |
| batch["attention_mask"] = torch.LongTensor([feature["attention_mask"] for feature in features]) |
|
|
| labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") |
|
|
| |
| labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) |
|
|
| |
| |
| if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item(): |
| labels = labels[:, 1:] |
|
|
| batch["labels"] = labels |
|
|
| return batch |
|
|
|
|
| def main(): |
| |
| |
| |
| |
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) |
|
|
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| else: |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| |
| |
| send_example_telemetry("run_speech_recognition_seq2seq", model_args, data_args) |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| log_level = training_args.get_process_log_level() |
| logger.setLevel(log_level) |
| datasets.utils.logging.set_verbosity(log_level) |
| transformers.utils.logging.set_verbosity(log_level) |
| transformers.utils.logging.enable_default_handler() |
| transformers.utils.logging.enable_explicit_format() |
|
|
| logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) |
|
|
| |
| logger.warning( |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " |
| f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" |
| ) |
| logger.info(f"Training/evaluation parameters {training_args}") |
|
|
| |
| if is_main_process(training_args.local_rank): |
| transformers.utils.logging.set_verbosity_info() |
| logger.info("Training/evaluation parameters %s", training_args) |
|
|
| |
| last_checkpoint = None |
| if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| "Use --overwrite_output_dir to overcome." |
| ) |
| elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: |
| logger.info( |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| ) |
|
|
| |
| set_seed(training_args.seed) |
|
|
| |
| raw_datasets = DatasetDict() |
|
|
| if training_args.do_train: |
| raw_datasets["train"] = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| split=data_args.train_split_name, |
| cache_dir=model_args.cache_dir, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| if training_args.do_eval: |
| raw_datasets["eval"] = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| split=data_args.eval_split_name, |
| cache_dir=model_args.cache_dir, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names: |
| raise ValueError( |
| f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. " |
| "Make sure to set `--audio_column_name` to the correct audio column - one of " |
| f"{', '.join(next(iter(raw_datasets.values())).column_names)}." |
| ) |
|
|
| if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names: |
| raise ValueError( |
| f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. " |
| "Make sure to set `--text_column_name` to the correct text column - one of " |
| f"{', '.join(next(iter(raw_datasets.values())).column_names)}." |
| ) |
|
|
| |
| |
| |
| |
| config = AutoConfig.from_pretrained( |
| model_args.config_name if model_args.config_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| |
| if getattr(config, "model_type", None) == "whisper": |
| config.update({"apply_spec_augment": model_args.apply_spec_augment}) |
|
|
| feature_extractor = AutoFeatureExtractor.from_pretrained( |
| model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| tokenizer = AutoTokenizer.from_pretrained( |
| model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast_tokenizer, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| model = AutoModelForSpeechSeq2Seq.from_pretrained( |
| model_args.model_name_or_path, |
| config=config, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| if model.config.decoder_start_token_id is None: |
| raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") |
|
|
| if model_args.freeze_feature_encoder: |
| model.freeze_feature_encoder() |
|
|
| if model_args.freeze_encoder: |
| model.freeze_encoder() |
| model.model.encoder.gradient_checkpointing = False |
|
|
| if hasattr(model.generation_config, "is_multilingual") and model.generation_config.is_multilingual: |
| |
| tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task) |
| model.generation_config.language = data_args.language |
| model.generation_config.task = data_args.task |
| elif data_args.language is not None: |
| raise ValueError( |
| "Setting language token for an English-only checkpoint is not permitted. The language argument should " |
| "only be set for multilingual checkpoints." |
| ) |
|
|
| |
| if model_args.forced_decoder_ids is not None: |
| logger.warning( |
| "The use of `forced_decoder_ids` is deprecated and will be removed in v4.41." |
| "Please use the `language` and `task` arguments instead" |
| ) |
| model.generation_config.forced_decoder_ids = model_args.forced_decoder_ids |
| else: |
| model.generation_config.forced_decoder_ids = None |
| model.config.forced_decoder_ids = None |
|
|
| if model_args.suppress_tokens is not None: |
| logger.warning( |
| "The use of `suppress_tokens` is deprecated and will be removed in v4.41." |
| "Should you need `suppress_tokens`, please manually set them in the fine-tuning script." |
| ) |
| model.generation_config.suppress_tokens = model_args.suppress_tokens |
|
|
| |
| dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate |
| if dataset_sampling_rate != feature_extractor.sampling_rate: |
| raw_datasets = raw_datasets.cast_column( |
| data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) |
| ) |
|
|
| |
| |
| max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate |
| min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate |
| audio_column_name = data_args.audio_column_name |
| num_workers = data_args.preprocessing_num_workers |
| text_column_name = data_args.text_column_name |
| model_input_name = feature_extractor.model_input_names[0] |
| do_lower_case = data_args.do_lower_case |
| |
| forward_attention_mask = ( |
| getattr(config, "model_type", None) == "whisper" |
| and getattr(config, "apply_spec_augment", False) |
| and getattr(config, "mask_time_prob", 0) > 0 |
| ) |
|
|
| if data_args.max_train_samples is not None: |
| raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) |
|
|
| if data_args.max_eval_samples is not None: |
| raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) |
|
|
| def prepare_dataset(batch): |
| |
| sample = batch[audio_column_name] |
| inputs = feature_extractor( |
| sample["array"], sampling_rate=sample["sampling_rate"], return_attention_mask=forward_attention_mask |
| ) |
| |
| batch[model_input_name] = inputs.get(model_input_name)[0] |
| batch["input_length"] = len(sample["array"]) |
| if forward_attention_mask: |
| batch["attention_mask"] = inputs.get("attention_mask")[0] |
|
|
| |
| input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name] |
| batch["labels"] = tokenizer(input_str).input_ids |
| return batch |
|
|
| with training_args.main_process_first(desc="dataset map pre-processing"): |
| vectorized_datasets = raw_datasets.map( |
| prepare_dataset, |
| remove_columns=next(iter(raw_datasets.values())).column_names, |
| num_proc=data_args.preprocessing_num_workers, |
| desc="preprocess train dataset", |
| ) |
|
|
| |
| |
| def is_audio_in_length_range(length): |
| return length > min_input_length and length < max_input_length |
|
|
| vectorized_datasets = vectorized_datasets.filter( |
| is_audio_in_length_range, |
| num_proc=num_workers, |
| input_columns=["input_length"], |
| ) |
|
|
| |
| |
| |
| |
| |
| if data_args.preprocessing_only: |
| cache = {k: v.cache_files for k, v in vectorized_datasets.items()} |
| logger.info(f"Data preprocessing finished. Files cached at {cache}.") |
| return |
|
|
| |
| metric = evaluate.load("wer", cache_dir=model_args.cache_dir) |
|
|
| def compute_metrics(pred): |
| pred_ids = pred.predictions |
|
|
| pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id |
|
|
| pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) |
| |
| label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True) |
|
|
| wer = metric.compute(predictions=pred_str, references=label_str) |
|
|
| return {"wer": wer} |
|
|
| |
| |
| with training_args.main_process_first(): |
| |
| if is_main_process(training_args.local_rank): |
| |
| feature_extractor.save_pretrained(training_args.output_dir) |
| tokenizer.save_pretrained(training_args.output_dir) |
| config.save_pretrained(training_args.output_dir) |
|
|
| processor = AutoProcessor.from_pretrained(training_args.output_dir) |
|
|
| |
| data_collator = DataCollatorSpeechSeq2SeqWithPadding( |
| processor=processor, |
| decoder_start_token_id=model.config.decoder_start_token_id, |
| forward_attention_mask=forward_attention_mask, |
| ) |
|
|
| |
| trainer = Seq2SeqTrainer( |
| model=model, |
| args=training_args, |
| train_dataset=vectorized_datasets["train"] if training_args.do_train else None, |
| eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, |
| processing_class=feature_extractor, |
| data_collator=data_collator, |
| compute_metrics=compute_metrics if training_args.predict_with_generate else None, |
| ) |
|
|
| |
| if training_args.do_train: |
| checkpoint = None |
| if training_args.resume_from_checkpoint is not None: |
| checkpoint = training_args.resume_from_checkpoint |
| elif last_checkpoint is not None: |
| checkpoint = last_checkpoint |
| train_result = trainer.train(resume_from_checkpoint=checkpoint) |
| trainer.save_model() |
|
|
| metrics = train_result.metrics |
| max_train_samples = ( |
| data_args.max_train_samples |
| if data_args.max_train_samples is not None |
| else len(vectorized_datasets["train"]) |
| ) |
| metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"])) |
| trainer.log_metrics("train", metrics) |
| trainer.save_metrics("train", metrics) |
| trainer.save_state() |
|
|
| |
| results = {} |
| if training_args.do_eval: |
| logger.info("*** Evaluate ***") |
| metrics = trainer.evaluate( |
| metric_key_prefix="eval", |
| max_length=training_args.generation_max_length, |
| num_beams=training_args.generation_num_beams, |
| ) |
| max_eval_samples = ( |
| data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"]) |
| ) |
| metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"])) |
|
|
| trainer.log_metrics("eval", metrics) |
| trainer.save_metrics("eval", metrics) |
|
|
| |
| kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "automatic-speech-recognition"} |
| if data_args.dataset_name is not None: |
| kwargs["dataset_tags"] = data_args.dataset_name |
| if data_args.dataset_config_name is not None: |
| kwargs["dataset_args"] = data_args.dataset_config_name |
| kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" |
| else: |
| kwargs["dataset"] = data_args.dataset_name |
|
|
| if training_args.push_to_hub: |
| trainer.push_to_hub(**kwargs) |
| else: |
| trainer.create_model_card(**kwargs) |
|
|
| return results |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|