| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import logging |
| | import os |
| | import sys |
| | import warnings |
| | from dataclasses import dataclass, field |
| | from random import randint |
| | from typing import Optional, List |
| |
|
| | import datasets |
| | import evaluate |
| | import numpy as np |
| | from datasets import DatasetDict, load_dataset |
| |
|
| | import transformers |
| | from transformers import ( |
| | AutoConfig, |
| | AutoFeatureExtractor, |
| | AutoModelForAudioClassification, |
| | HfArgumentParser, |
| | Trainer, |
| | TrainingArguments, |
| | set_seed, |
| | ) |
| | from transformers.trainer_utils import get_last_checkpoint |
| | from transformers.utils import check_min_version, send_example_telemetry |
| | from transformers.utils.versions import require_version |
| |
|
| |
|
| | from trplib import apply_trp |
| |
|
| |
|
| | logger = logging.getLogger(__name__) |
| |
|
| | |
| | |
| |
|
| | require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") |
| |
|
| |
|
| | def random_subsample(wav: np.ndarray, max_length: float, sample_rate: int = 16000): |
| | """Randomly sample chunks of `max_length` seconds from the input audio""" |
| | sample_length = int(round(sample_rate * max_length)) |
| | if len(wav) <= sample_length: |
| | return wav |
| | random_offset = randint(0, len(wav) - sample_length - 1) |
| | return wav[random_offset : random_offset + sample_length] |
| |
|
| |
|
| | @dataclass |
| | class DataTrainingArguments: |
| | """ |
| | Arguments pertaining to what data we are going to input our model for training and eval. |
| | Using `HfArgumentParser` we can turn this class |
| | into argparse arguments to be able to specify them on |
| | the command line. |
| | """ |
| |
|
| | dataset_name: Optional[str] = field(default=None, metadata={"help": "Name of a dataset from the datasets package"}) |
| | dataset_config_name: Optional[str] = field( |
| | default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| | ) |
| | train_file: Optional[str] = field( |
| | default=None, metadata={"help": "A file containing the training audio paths and labels."} |
| | ) |
| | eval_file: Optional[str] = field( |
| | default=None, metadata={"help": "A file containing the validation audio paths and labels."} |
| | ) |
| | train_split_name: str = field( |
| | default="train", |
| | metadata={ |
| | "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" |
| | }, |
| | ) |
| | eval_split_name: str = field( |
| | default="validation", |
| | metadata={ |
| | "help": ( |
| | "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" |
| | ) |
| | }, |
| | ) |
| | audio_column_name: str = field( |
| | default="audio", |
| | metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, |
| | ) |
| | label_column_name: str = field( |
| | default="label", metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} |
| | ) |
| | max_train_samples: Optional[int] = field( |
| | default=None, |
| | metadata={ |
| | "help": ( |
| | "For debugging purposes or quicker training, truncate the number of training examples to this " |
| | "value if set." |
| | ) |
| | }, |
| | ) |
| | max_eval_samples: Optional[int] = field( |
| | default=None, |
| | metadata={ |
| | "help": ( |
| | "For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
| | "value if set." |
| | ) |
| | }, |
| | ) |
| | max_length_seconds: float = field( |
| | default=20, |
| | metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."}, |
| | ) |
| |
|
| |
|
| | @dataclass |
| | class ModelArguments: |
| | """ |
| | Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| | """ |
| |
|
| | model_name_or_path: str = field( |
| | default="facebook/wav2vec2-base", |
| | metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, |
| | ) |
| | config_name: Optional[str] = field( |
| | default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
| | ) |
| | cache_dir: Optional[str] = field( |
| | default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} |
| | ) |
| | model_revision: str = field( |
| | default="main", |
| | metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| | ) |
| | feature_extractor_name: Optional[str] = field( |
| | default=None, metadata={"help": "Name or path of preprocessor config."} |
| | ) |
| | freeze_feature_encoder: bool = field( |
| | default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} |
| | ) |
| | attention_mask: bool = field( |
| | default=True, metadata={"help": "Whether to generate an attention mask in the feature extractor."} |
| | ) |
| | token: str = field( |
| | default=None, |
| | metadata={ |
| | "help": ( |
| | "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " |
| | "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." |
| | ) |
| | }, |
| | ) |
| | trust_remote_code: bool = field( |
| | default=False, |
| | metadata={ |
| | "help": ( |
| | "Whether to trust the execution of code from datasets/models defined on the Hub." |
| | " This option should only be set to `True` for repositories you trust and in which you have read the" |
| | " code, as it will execute code present on the Hub on your local machine." |
| | ) |
| | }, |
| | ) |
| | freeze_feature_extractor: Optional[bool] = field( |
| | default=None, metadata={"help": "Whether to freeze the feature extractor layers of the model."} |
| | ) |
| | ignore_mismatched_sizes: bool = field( |
| | default=False, |
| | metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, |
| | ) |
| |
|
| | apply_trp: Optional[bool] = field( |
| | default=False, |
| | metadata={"help": "Whether to apply trp or not."}, |
| | ) |
| | trp_depths: Optional[int] = field( |
| | default=1, |
| | metadata={ |
| | "help": "TRP depth value." |
| | }, |
| | ) |
| | trp_p: Optional[float] = field( |
| | default=0.1, |
| | metadata={ |
| | "help": "TRP p value." |
| | }, |
| | ) |
| | trp_lambdas: Optional[List[float]] = field( |
| | default_factory=lambda: [0.4, 0.2, 0.1], |
| | metadata={ |
| | "help": "TRP lambda values (list of floats)." |
| | }, |
| | ) |
| |
|
| | def __post_init__(self): |
| | if not self.freeze_feature_extractor and self.freeze_feature_encoder: |
| | warnings.warn( |
| | "The argument `--freeze_feature_extractor` is deprecated and " |
| | "will be removed in a future version. Use `--freeze_feature_encoder` " |
| | "instead. Setting `freeze_feature_encoder==True`.", |
| | FutureWarning, |
| | ) |
| | if self.freeze_feature_extractor and not self.freeze_feature_encoder: |
| | raise ValueError( |
| | "The argument `--freeze_feature_extractor` is deprecated and " |
| | "should not be used in combination with `--freeze_feature_encoder`. " |
| | "Only make use of `--freeze_feature_encoder`." |
| | ) |
| |
|
| |
|
| | def main(): |
| | |
| | |
| | |
| |
|
| | parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) |
| | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| | |
| | |
| | model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| | else: |
| | model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
| |
|
| | |
| | |
| | send_example_telemetry("run_audio_classification", model_args, data_args) |
| |
|
| | |
| | logging.basicConfig( |
| | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| | datefmt="%m/%d/%Y %H:%M:%S", |
| | handlers=[logging.StreamHandler(sys.stdout)], |
| | ) |
| |
|
| | if training_args.should_log: |
| | |
| | transformers.utils.logging.set_verbosity_info() |
| |
|
| | log_level = training_args.get_process_log_level() |
| | logger.setLevel(log_level) |
| | transformers.utils.logging.set_verbosity(log_level) |
| | transformers.utils.logging.enable_default_handler() |
| | transformers.utils.logging.enable_explicit_format() |
| |
|
| | |
| | logger.warning( |
| | f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " |
| | + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" |
| | ) |
| | logger.info(f"Training/evaluation parameters {training_args}") |
| |
|
| | |
| | set_seed(training_args.seed) |
| |
|
| | |
| | last_checkpoint = None |
| | if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: |
| | last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| | if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| | raise ValueError( |
| | f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| | "Use --overwrite_output_dir to train from scratch." |
| | ) |
| | elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: |
| | logger.info( |
| | f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| | "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| | ) |
| |
|
| | |
| | raw_datasets = DatasetDict() |
| | raw_datasets["train"] = load_dataset( |
| | data_args.dataset_name, |
| | data_args.dataset_config_name, |
| | split=data_args.train_split_name, |
| | token=model_args.token, |
| | trust_remote_code=model_args.trust_remote_code, |
| | ) |
| | raw_datasets["eval"] = load_dataset( |
| | data_args.dataset_name, |
| | data_args.dataset_config_name, |
| | split=data_args.eval_split_name, |
| | token=model_args.token, |
| | trust_remote_code=model_args.trust_remote_code, |
| | ) |
| |
|
| | if data_args.audio_column_name not in raw_datasets["train"].column_names: |
| | raise ValueError( |
| | f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. " |
| | "Make sure to set `--audio_column_name` to the correct audio column - one of " |
| | f"{', '.join(raw_datasets['train'].column_names)}." |
| | ) |
| |
|
| | if data_args.label_column_name not in raw_datasets["train"].column_names: |
| | raise ValueError( |
| | f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " |
| | "Make sure to set `--label_column_name` to the correct text column - one of " |
| | f"{', '.join(raw_datasets['train'].column_names)}." |
| | ) |
| |
|
| | |
| | |
| | feature_extractor = AutoFeatureExtractor.from_pretrained( |
| | model_args.feature_extractor_name or model_args.model_name_or_path, |
| | return_attention_mask=model_args.attention_mask, |
| | cache_dir=model_args.cache_dir, |
| | revision=model_args.model_revision, |
| | token=model_args.token, |
| | trust_remote_code=model_args.trust_remote_code, |
| | ) |
| |
|
| | |
| | |
| | raw_datasets = raw_datasets.cast_column( |
| | data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) |
| | ) |
| |
|
| | model_input_name = feature_extractor.model_input_names[0] |
| |
|
| | def train_transforms(batch): |
| | """Apply train_transforms across a batch.""" |
| | subsampled_wavs = [] |
| | for audio in batch[data_args.audio_column_name]: |
| | wav = random_subsample( |
| | audio["array"], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate |
| | ) |
| | subsampled_wavs.append(wav) |
| | inputs = feature_extractor(subsampled_wavs, sampling_rate=feature_extractor.sampling_rate) |
| | output_batch = {model_input_name: inputs.get(model_input_name)} |
| | output_batch["labels"] = list(batch[data_args.label_column_name]) |
| |
|
| | return output_batch |
| |
|
| | def val_transforms(batch): |
| | """Apply val_transforms across a batch.""" |
| | wavs = [audio["array"] for audio in batch[data_args.audio_column_name]] |
| | inputs = feature_extractor(wavs, sampling_rate=feature_extractor.sampling_rate) |
| | output_batch = {model_input_name: inputs.get(model_input_name)} |
| | output_batch["labels"] = list(batch[data_args.label_column_name]) |
| |
|
| | return output_batch |
| |
|
| | |
| | |
| | labels = raw_datasets["train"].features[data_args.label_column_name].names |
| | label2id, id2label = {}, {} |
| | for i, label in enumerate(labels): |
| | label2id[label] = str(i) |
| | id2label[str(i)] = label |
| |
|
| | |
| | metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) |
| |
|
| | |
| | |
| | def compute_metrics(eval_pred): |
| | """Computes accuracy on a batch of predictions""" |
| | predictions = np.argmax(eval_pred.predictions, axis=1) |
| | return metric.compute(predictions=predictions, references=eval_pred.label_ids) |
| |
|
| | config = AutoConfig.from_pretrained( |
| | model_args.config_name or model_args.model_name_or_path, |
| | num_labels=len(labels), |
| | label2id=label2id, |
| | id2label=id2label, |
| | finetuning_task="audio-classification", |
| | cache_dir=model_args.cache_dir, |
| | revision=model_args.model_revision, |
| | token=model_args.token, |
| | trust_remote_code=model_args.trust_remote_code, |
| | ) |
| | model = AutoModelForAudioClassification.from_pretrained( |
| | model_args.model_name_or_path, |
| | from_tf=bool(".ckpt" in model_args.model_name_or_path), |
| | config=config, |
| | cache_dir=model_args.cache_dir, |
| | revision=model_args.model_revision, |
| | token=model_args.token, |
| | trust_remote_code=model_args.trust_remote_code, |
| | ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, |
| | ) |
| |
|
| | |
| | if model_args.freeze_feature_encoder: |
| | model.freeze_feature_encoder() |
| |
|
| | if model_args.apply_trp: |
| | model = apply_trp(model, depths=model_args.trp_depths, p=model_args.trp_p, lambdas=model_args.trp_lambdas) |
| |
|
| | if training_args.do_train: |
| | if data_args.max_train_samples is not None: |
| | raw_datasets["train"] = ( |
| | raw_datasets["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) |
| | ) |
| | |
| | raw_datasets["train"].set_transform(train_transforms, output_all_columns=False) |
| |
|
| | if training_args.do_eval: |
| | if data_args.max_eval_samples is not None: |
| | raw_datasets["eval"] = ( |
| | raw_datasets["eval"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) |
| | ) |
| | |
| | raw_datasets["eval"].set_transform(val_transforms, output_all_columns=False) |
| |
|
| | |
| | trainer = Trainer( |
| | model=model, |
| | args=training_args, |
| | train_dataset=raw_datasets["train"] if training_args.do_train else None, |
| | eval_dataset=raw_datasets["eval"] if training_args.do_eval else None, |
| | compute_metrics=compute_metrics, |
| | processing_class=feature_extractor, |
| | ) |
| |
|
| | |
| | if training_args.do_train: |
| | checkpoint = None |
| | if training_args.resume_from_checkpoint is not None: |
| | checkpoint = training_args.resume_from_checkpoint |
| | elif last_checkpoint is not None: |
| | checkpoint = last_checkpoint |
| | train_result = trainer.train(resume_from_checkpoint=checkpoint) |
| | trainer.save_model() |
| | trainer.log_metrics("train", train_result.metrics) |
| | trainer.save_metrics("train", train_result.metrics) |
| | trainer.save_state() |
| |
|
| | |
| | if training_args.do_eval: |
| | metrics = trainer.evaluate() |
| | trainer.log_metrics("eval", metrics) |
| | trainer.save_metrics("eval", metrics) |
| |
|
| | |
| | kwargs = { |
| | "finetuned_from": model_args.model_name_or_path, |
| | "tasks": "audio-classification", |
| | "dataset": data_args.dataset_name, |
| | "tags": ["audio-classification"], |
| | } |
| | if training_args.push_to_hub: |
| | trainer.push_to_hub(**kwargs) |
| | else: |
| | trainer.create_model_card(**kwargs) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|