| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ PyTorch Wav2Vec2-Conformer model.""" |
| |
|
| | import math |
| | from dataclasses import dataclass |
| | from typing import Optional, Tuple, Union |
| |
|
| | import numpy as np |
| | import torch |
| | import torch.utils.checkpoint |
| | from torch import nn |
| | from torch.nn import CrossEntropyLoss |
| | from torch.nn import functional as F |
| |
|
| | from transformers.activations import ACT2FN |
| | from transformers.deepspeed import is_deepspeed_zero3_enabled |
| | from transformers.modeling_outputs import ( |
| | BaseModelOutput, |
| | CausalLMOutput, |
| | SequenceClassifierOutput, |
| | TokenClassifierOutput, |
| | Wav2Vec2BaseModelOutput, |
| | XVectorOutput, |
| | ) |
| | from transformers.modeling_utils import PreTrainedModel |
| | from transformers.utils import ( |
| | ModelOutput, |
| | add_code_sample_docstrings, |
| | add_start_docstrings, |
| | add_start_docstrings_to_model_forward, |
| | logging, |
| | replace_return_docstrings, |
| | ) |
| | from transformers.models.wav2vec2_conformer.configuration_wav2vec2_conformer import Wav2Vec2ConformerConfig |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | _HIDDEN_STATES_START_POSITION = 2 |
| |
|
| | |
| | _CONFIG_FOR_DOC = "Wav2Vec2ConformerConfig" |
| |
|
| | |
| | _CHECKPOINT_FOR_DOC = "facebook/wav2vec2-conformer-rope-large-960h-ft" |
| | _EXPECTED_OUTPUT_SHAPE = [1, 292, 1024] |
| |
|
| | |
| | _CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'" |
| | _CTC_EXPECTED_LOSS = 64.21 |
| |
|
| |
|
| | WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ |
| | "facebook/wav2vec2-conformer-rel-pos-large", |
| | |
| | ] |
| |
|
| |
|
| | @dataclass |
| | |
| | class Wav2Vec2ConformerForPreTrainingOutput(ModelOutput): |
| | """ |
| | Output type of [`Wav2Vec2ConformerForPreTraining`], with potential hidden states and attentions. |
| | |
| | Args: |
| | loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`): |
| | Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official |
| | paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. |
| | projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): |
| | Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked |
| | projected quantized states. |
| | projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): |
| | Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive |
| | target vectors for contrastive loss. |
| | hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| | Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of |
| | shape `(batch_size, sequence_length, hidden_size)`. |
| | |
| | Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
| | attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| | Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| | sequence_length)`. |
| | |
| | Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| | heads. |
| | contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`): |
| | The contrastive loss (L_m) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . |
| | diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`): |
| | The diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . |
| | """ |
| |
|
| | loss: Optional[torch.FloatTensor] = None |
| | projected_states: torch.FloatTensor = None |
| | projected_quantized_states: torch.FloatTensor = None |
| | codevector_perplexity: torch.FloatTensor = None |
| | hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
| | attentions: Optional[Tuple[torch.FloatTensor]] = None |
| | contrastive_loss: Optional[torch.FloatTensor] = None |
| | diversity_loss: Optional[torch.FloatTensor] = None |
| |
|
| |
|
| | |
| | def _compute_mask_indices( |
| | shape: Tuple[int, int], |
| | mask_prob: float, |
| | mask_length: int, |
| | attention_mask: Optional[torch.LongTensor] = None, |
| | min_masks: int = 0, |
| | ) -> np.ndarray: |
| | """ |
| | Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for |
| | ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on |
| | CPU as part of the preprocessing during training. |
| | |
| | Args: |
| | shape: The shape for which to compute masks. This should be of a tuple of size 2 where |
| | the first element is the batch size and the second element is the length of the axis to span. |
| | mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of |
| | independently generated mask spans of length `mask_length` is computed by |
| | `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the |
| | actual percentage will be smaller. |
| | mask_length: size of the mask |
| | min_masks: minimum number of masked spans |
| | attention_mask: A (right-padded) attention mask which independently shortens the feature axis of |
| | each batch dimension. |
| | """ |
| | batch_size, sequence_length = shape |
| |
|
| | if mask_length < 1: |
| | raise ValueError("`mask_length` has to be bigger than 0.") |
| |
|
| | if mask_length > sequence_length: |
| | raise ValueError( |
| | f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" |
| | f" and `sequence_length`: {sequence_length}`" |
| | ) |
| |
|
| | |
| | epsilon = np.random.rand(1).item() |
| |
|
| | def compute_num_masked_span(input_length): |
| | """Given input length, compute how many spans should be masked""" |
| | num_masked_span = int(mask_prob * input_length / mask_length + epsilon) |
| | num_masked_span = max(num_masked_span, min_masks) |
| |
|
| | |
| | if num_masked_span * mask_length > sequence_length: |
| | num_masked_span = sequence_length // mask_length |
| |
|
| | |
| | if input_length - (mask_length - 1) < num_masked_span: |
| | num_masked_span = max(input_length - (mask_length - 1), 0) |
| |
|
| | return num_masked_span |
| |
|
| | |
| | input_lengths = ( |
| | attention_mask.sum(-1).detach().tolist() |
| | if attention_mask is not None |
| | else [sequence_length for _ in range(batch_size)] |
| | ) |
| |
|
| | |
| | spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) |
| | spec_aug_mask_idxs = [] |
| |
|
| | max_num_masked_span = compute_num_masked_span(sequence_length) |
| |
|
| | if max_num_masked_span == 0: |
| | return spec_aug_mask |
| |
|
| | for input_length in input_lengths: |
| | |
| | num_masked_span = compute_num_masked_span(input_length) |
| |
|
| | |
| | spec_aug_mask_idx = np.random.choice( |
| | np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False |
| | ) |
| |
|
| | |
| | |
| | |
| | if len(spec_aug_mask_idx) == 0: |
| | |
| | |
| | |
| | dummy_mask_idx = sequence_length - 1 |
| | else: |
| | dummy_mask_idx = spec_aug_mask_idx[0] |
| |
|
| | spec_aug_mask_idx = np.concatenate( |
| | [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] |
| | ) |
| | spec_aug_mask_idxs.append(spec_aug_mask_idx) |
| |
|
| | spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) |
| |
|
| | |
| | spec_aug_mask_idxs = np.broadcast_to( |
| | spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) |
| | ) |
| | spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) |
| |
|
| | |
| | offsets = np.arange(mask_length)[None, None, :] |
| | offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( |
| | batch_size, max_num_masked_span * mask_length |
| | ) |
| | spec_aug_mask_idxs = spec_aug_mask_idxs + offsets |
| |
|
| | |
| | if spec_aug_mask_idxs.max() > sequence_length - 1: |
| | spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 |
| |
|
| | |
| | np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) |
| |
|
| | return spec_aug_mask |
| |
|
| |
|
| | |
| | def _sample_negative_indices( |
| | features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None |
| | ): |
| | """ |
| | Sample `num_negatives` vectors from feature vectors. |
| | """ |
| | batch_size, sequence_length = features_shape |
| |
|
| | |
| | sequence_length_range = np.arange(sequence_length) |
| |
|
| | |
| | sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32) |
| |
|
| | mask_time_indices = ( |
| | mask_time_indices.astype(bool) if mask_time_indices is not None else np.ones(features_shape, dtype=bool) |
| | ) |
| |
|
| | for batch_idx in range(batch_size): |
| | high = mask_time_indices[batch_idx].sum() - 1 |
| | mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]] |
| |
|
| | feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives)) |
| | sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives)) |
| | |
| | sampled_indices[sampled_indices >= feature_indices] += 1 |
| |
|
| | |
| | sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices] |
| |
|
| | |
| | sampled_negative_indices[batch_idx] += batch_idx * sequence_length |
| |
|
| | return sampled_negative_indices |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerNoLayerNormConvLayer(nn.Module): |
| | def __init__(self, config, layer_id=0): |
| | super().__init__() |
| | self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 |
| | self.out_conv_dim = config.conv_dim[layer_id] |
| |
|
| | self.conv = nn.Conv1d( |
| | self.in_conv_dim, |
| | self.out_conv_dim, |
| | kernel_size=config.conv_kernel[layer_id], |
| | stride=config.conv_stride[layer_id], |
| | bias=config.conv_bias, |
| | ) |
| | self.activation = ACT2FN[config.feat_extract_activation] |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = self.conv(hidden_states) |
| | hidden_states = self.activation(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerLayerNormConvLayer(nn.Module): |
| | def __init__(self, config, layer_id=0): |
| | super().__init__() |
| | self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 |
| | self.out_conv_dim = config.conv_dim[layer_id] |
| |
|
| | self.conv = nn.Conv1d( |
| | self.in_conv_dim, |
| | self.out_conv_dim, |
| | kernel_size=config.conv_kernel[layer_id], |
| | stride=config.conv_stride[layer_id], |
| | bias=config.conv_bias, |
| | ) |
| | self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) |
| | self.activation = ACT2FN[config.feat_extract_activation] |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = self.conv(hidden_states) |
| |
|
| | hidden_states = hidden_states.transpose(-2, -1) |
| | hidden_states = self.layer_norm(hidden_states) |
| | hidden_states = hidden_states.transpose(-2, -1) |
| |
|
| | hidden_states = self.activation(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerGroupNormConvLayer(nn.Module): |
| | def __init__(self, config, layer_id=0): |
| | super().__init__() |
| | self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 |
| | self.out_conv_dim = config.conv_dim[layer_id] |
| |
|
| | self.conv = nn.Conv1d( |
| | self.in_conv_dim, |
| | self.out_conv_dim, |
| | kernel_size=config.conv_kernel[layer_id], |
| | stride=config.conv_stride[layer_id], |
| | bias=config.conv_bias, |
| | ) |
| | self.activation = ACT2FN[config.feat_extract_activation] |
| |
|
| | self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = self.conv(hidden_states) |
| | hidden_states = self.layer_norm(hidden_states) |
| | hidden_states = self.activation(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerPositionalConvEmbedding(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.conv = nn.Conv1d( |
| | config.hidden_size, |
| | config.hidden_size, |
| | kernel_size=config.num_conv_pos_embeddings, |
| | padding=config.num_conv_pos_embeddings // 2, |
| | groups=config.num_conv_pos_embedding_groups, |
| | ) |
| |
|
| | if is_deepspeed_zero3_enabled(): |
| | import deepspeed |
| |
|
| | with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): |
| | self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) |
| | deepspeed.zero.register_external_parameter(self, self.conv.weight_v) |
| | deepspeed.zero.register_external_parameter(self, self.conv.weight_g) |
| | else: |
| | self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) |
| |
|
| | self.padding = Wav2Vec2ConformerSamePadLayer(config.num_conv_pos_embeddings) |
| | self.activation = ACT2FN[config.feat_extract_activation] |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = hidden_states.transpose(1, 2) |
| |
|
| | hidden_states = self.conv(hidden_states) |
| | hidden_states = self.padding(hidden_states) |
| | hidden_states = self.activation(hidden_states) |
| |
|
| | hidden_states = hidden_states.transpose(1, 2) |
| | return hidden_states |
| |
|
| |
|
| | class Wav2Vec2ConformerRotaryPositionalEmbedding(nn.Module): |
| | """Rotary positional embedding |
| | Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf |
| | """ |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | dim = config.hidden_size // config.num_attention_heads |
| | base = config.rotary_embedding_base |
| |
|
| | inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) |
| | self.register_buffer("inv_freq", inv_freq) |
| | self.cached_sequence_length = None |
| | self.cached_rotary_positional_embedding = None |
| |
|
| | def forward(self, hidden_states): |
| | sequence_length = hidden_states.shape[1] |
| |
|
| | if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None: |
| | return self.cached_rotary_positional_embedding |
| |
|
| | self.cached_sequence_length = sequence_length |
| | time_stamps = torch.arange(sequence_length).type_as(self.inv_freq) |
| | freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq) |
| | embeddings = torch.cat((freqs, freqs), dim=-1) |
| |
|
| | cos_embeddings = embeddings.cos()[:, None, None, :] |
| | sin_embeddings = embeddings.sin()[:, None, None, :] |
| | self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings]) |
| | return self.cached_rotary_positional_embedding |
| |
|
| |
|
| | class Wav2Vec2ConformerRelPositionalEmbedding(nn.Module): |
| | """Relative positional encoding module.""" |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | self.max_len = config.max_source_positions |
| | self.d_model = config.hidden_size |
| | self.pe = None |
| | self.extend_pe(torch.tensor(0.0).expand(1, self.max_len)) |
| |
|
| | def extend_pe(self, x): |
| | |
| | if self.pe is not None: |
| | |
| | |
| | if self.pe.size(1) >= x.size(1) * 2 - 1: |
| | if self.pe.dtype != x.dtype or self.pe.device != x.device: |
| | self.pe = self.pe.to(dtype=x.dtype, device=x.device) |
| | return |
| | |
| | |
| | |
| | pe_positive = torch.zeros(x.size(1), self.d_model) |
| | pe_negative = torch.zeros(x.size(1), self.d_model) |
| | position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) |
| | div_term = torch.exp( |
| | torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model) |
| | ) |
| | pe_positive[:, 0::2] = torch.sin(position * div_term) |
| | pe_positive[:, 1::2] = torch.cos(position * div_term) |
| | pe_negative[:, 0::2] = torch.sin(-1 * position * div_term) |
| | pe_negative[:, 1::2] = torch.cos(-1 * position * div_term) |
| |
|
| | |
| | |
| | |
| | pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0) |
| | pe_negative = pe_negative[1:].unsqueeze(0) |
| | pe = torch.cat([pe_positive, pe_negative], dim=1) |
| | self.pe = pe.to(device=x.device, dtype=x.dtype) |
| |
|
| | def forward(self, hidden_states: torch.Tensor): |
| | self.extend_pe(hidden_states) |
| | start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1 |
| | end_idx = self.pe.size(1) // 2 + hidden_states.size(1) |
| | relative_position_embeddings = self.pe[:, start_idx:end_idx] |
| |
|
| | return relative_position_embeddings |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerSamePadLayer(nn.Module): |
| | def __init__(self, num_conv_pos_embeddings): |
| | super().__init__() |
| | self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 |
| |
|
| | def forward(self, hidden_states): |
| | if self.num_pad_remove > 0: |
| | hidden_states = hidden_states[:, :, : -self.num_pad_remove] |
| | return hidden_states |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerFeatureEncoder(nn.Module): |
| | """Construct the features from raw audio waveform""" |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| |
|
| | if config.feat_extract_norm == "group": |
| | conv_layers = [Wav2Vec2ConformerGroupNormConvLayer(config, layer_id=0)] + [ |
| | Wav2Vec2ConformerNoLayerNormConvLayer(config, layer_id=i + 1) |
| | for i in range(config.num_feat_extract_layers - 1) |
| | ] |
| | elif config.feat_extract_norm == "layer": |
| | conv_layers = [ |
| | Wav2Vec2ConformerLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers) |
| | ] |
| | else: |
| | raise ValueError( |
| | f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" |
| | ) |
| | self.conv_layers = nn.ModuleList(conv_layers) |
| | self.gradient_checkpointing = False |
| | self._requires_grad = True |
| |
|
| | def _freeze_parameters(self): |
| | for param in self.parameters(): |
| | param.requires_grad = False |
| | self._requires_grad = False |
| |
|
| | def forward(self, input_values): |
| | hidden_states = input_values[:, None] |
| |
|
| | |
| | if self._requires_grad and self.training: |
| | hidden_states.requires_grad = True |
| |
|
| | for conv_layer in self.conv_layers: |
| | if self._requires_grad and self.gradient_checkpointing and self.training: |
| |
|
| | def create_custom_forward(module): |
| | def custom_forward(*inputs): |
| | return module(*inputs) |
| |
|
| | return custom_forward |
| |
|
| | hidden_states = torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(conv_layer), |
| | hidden_states, |
| | ) |
| | else: |
| | hidden_states = conv_layer(hidden_states) |
| |
|
| | return hidden_states |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerFeatureProjection(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) |
| | self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) |
| | self.dropout = nn.Dropout(config.feat_proj_dropout) |
| |
|
| | def forward(self, hidden_states): |
| | |
| | norm_hidden_states = self.layer_norm(hidden_states) |
| | hidden_states = self.projection(norm_hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | return hidden_states, norm_hidden_states |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerFeedForward(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.intermediate_dropout = nn.Dropout(config.activation_dropout) |
| |
|
| | self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) |
| | if isinstance(config.hidden_act, str): |
| | self.intermediate_act_fn = ACT2FN[config.hidden_act] |
| | else: |
| | self.intermediate_act_fn = config.hidden_act |
| |
|
| | self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) |
| | self.output_dropout = nn.Dropout(config.hidden_dropout) |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = self.intermediate_dense(hidden_states) |
| | hidden_states = self.intermediate_act_fn(hidden_states) |
| | hidden_states = self.intermediate_dropout(hidden_states) |
| |
|
| | hidden_states = self.output_dense(hidden_states) |
| | hidden_states = self.output_dropout(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | class Wav2Vec2ConformerConvolutionModule(nn.Module): |
| | """Convolution block used in the conformer block""" |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | if (config.conv_depthwise_kernel_size - 1) % 2 == 1: |
| | raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding") |
| | self.layer_norm = nn.LayerNorm(config.hidden_size) |
| | self.pointwise_conv1 = torch.nn.Conv1d( |
| | config.hidden_size, |
| | 2 * config.hidden_size, |
| | kernel_size=1, |
| | stride=1, |
| | padding=0, |
| | bias=False, |
| | ) |
| | self.glu = torch.nn.GLU(dim=1) |
| | self.depthwise_conv = torch.nn.Conv1d( |
| | config.hidden_size, |
| | config.hidden_size, |
| | config.conv_depthwise_kernel_size, |
| | stride=1, |
| | padding=(config.conv_depthwise_kernel_size - 1) // 2, |
| | groups=config.hidden_size, |
| | bias=False, |
| | ) |
| | self.batch_norm = torch.nn.BatchNorm1d(config.hidden_size) |
| | self.activation = ACT2FN[config.hidden_act] |
| | self.pointwise_conv2 = torch.nn.Conv1d( |
| | config.hidden_size, |
| | config.hidden_size, |
| | kernel_size=1, |
| | stride=1, |
| | padding=0, |
| | bias=False, |
| | ) |
| | self.dropout = torch.nn.Dropout(config.conformer_conv_dropout) |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = self.layer_norm(hidden_states) |
| | |
| | hidden_states = hidden_states.transpose(1, 2) |
| |
|
| | |
| | |
| | hidden_states = self.pointwise_conv1(hidden_states) |
| | |
| | hidden_states = self.glu(hidden_states) |
| |
|
| | |
| | hidden_states = self.depthwise_conv(hidden_states) |
| | hidden_states = self.batch_norm(hidden_states) |
| | hidden_states = self.activation(hidden_states) |
| |
|
| | hidden_states = self.pointwise_conv2(hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | hidden_states = hidden_states.transpose(1, 2) |
| | return hidden_states |
| |
|
| |
|
| | class Wav2Vec2ConformerSelfAttention(nn.Module): |
| | """Construct an Wav2Vec2ConformerSelfAttention object. |
| | Can be enhanced with rotary or relative position embeddings. |
| | """ |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| |
|
| | self.head_size = config.hidden_size // config.num_attention_heads |
| | self.num_heads = config.num_attention_heads |
| | self.position_embeddings_type = config.position_embeddings_type |
| |
|
| | self.linear_q = nn.Linear(config.hidden_size, config.hidden_size) |
| | self.linear_k = nn.Linear(config.hidden_size, config.hidden_size) |
| | self.linear_v = nn.Linear(config.hidden_size, config.hidden_size) |
| | self.linear_out = nn.Linear(config.hidden_size, config.hidden_size) |
| |
|
| | self.dropout = nn.Dropout(p=config.attention_dropout) |
| | self.dropout_p = config.attention_dropout |
| |
|
| | self.is_causal = config.is_causal |
| |
|
| | if self.position_embeddings_type == "relative": |
| | |
| | self.linear_pos = nn.Linear(config.hidden_size, config.hidden_size, bias=False) |
| | |
| | |
| | self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) |
| | self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | relative_position_embeddings: Optional[torch.Tensor] = None, |
| | output_attentions: bool = False, |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| | |
| | batch_size, sequence_length, hidden_size = hidden_states.size() |
| |
|
| | |
| | query_key_states = hidden_states |
| | value_states = hidden_states |
| |
|
| | if self.position_embeddings_type == "rotary": |
| | if relative_position_embeddings is None: |
| | raise ValueError( |
| | "`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'" |
| | ) |
| | query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings) |
| |
|
| | |
| | query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) |
| | key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) |
| | value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size) |
| |
|
| | |
| | query = query.transpose(1, 2) |
| | key = key.transpose(1, 2) |
| | value = value.transpose(1, 2) |
| |
|
| | with torch.backends.cuda.sdp_kernel(enable_math=False, enable_flash=True, enable_mem_efficient=False): |
| | hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=self.dropout_p, is_causal=self.is_causal) |
| | probs = None |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| |
|
| | |
| | hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size) |
| | hidden_states = self.linear_out(hidden_states) |
| |
|
| | return hidden_states, probs |
| |
|
| | def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings): |
| | batch_size, sequence_length, hidden_size = hidden_states.size() |
| | hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size) |
| |
|
| | cos = relative_position_embeddings[0, :sequence_length, ...] |
| | sin = relative_position_embeddings[1, :sequence_length, ...] |
| |
|
| | |
| | hidden_states = hidden_states.transpose(0, 1) |
| | rotated_states_begin = hidden_states[..., : self.head_size // 2] |
| | rotated_states_end = hidden_states[..., self.head_size // 2 :] |
| | rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1) |
| | hidden_states = (hidden_states * cos) + (rotated_states * sin) |
| | hidden_states = hidden_states.transpose(0, 1) |
| |
|
| | hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size) |
| |
|
| | return hidden_states |
| |
|
| | def _apply_relative_embeddings(self, query, key, relative_position_embeddings): |
| | |
| | |
| | proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings) |
| | proj_relative_position_embeddings = proj_relative_position_embeddings.view( |
| | relative_position_embeddings.size(0), -1, self.num_heads, self.head_size |
| | ) |
| | proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2) |
| | proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3) |
| |
|
| | |
| | |
| | query = query.transpose(1, 2) |
| | q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2) |
| | q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2) |
| |
|
| | |
| | |
| | |
| | scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1)) |
| |
|
| | |
| | |
| | scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings) |
| |
|
| | |
| | zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype) |
| | scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1) |
| | scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2]) |
| | scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape) |
| | scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd) |
| | scores_bd = scores_bd[:, :, :, : scores_bd.size(-1) // 2 + 1] |
| |
|
| | |
| | |
| | scores = (scores_ac + scores_bd) / math.sqrt(self.head_size) |
| |
|
| | return scores |
| |
|
| |
|
| | class Wav2Vec2ConformerEncoderLayer(nn.Module): |
| | """Conformer block based on https://arxiv.org/abs/2005.08100.""" |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | embed_dim = config.hidden_size |
| | dropout = config.attention_dropout |
| |
|
| | |
| | self.ffn1_layer_norm = nn.LayerNorm(embed_dim) |
| | self.ffn1 = Wav2Vec2ConformerFeedForward(config) |
| |
|
| | |
| | self.self_attn_layer_norm = nn.LayerNorm(embed_dim) |
| | self.self_attn_dropout = torch.nn.Dropout(dropout) |
| | self.self_attn = Wav2Vec2ConformerSelfAttention(config) |
| |
|
| | |
| | self.conv_module = Wav2Vec2ConformerConvolutionModule(config) |
| |
|
| | |
| | self.ffn2_layer_norm = nn.LayerNorm(embed_dim) |
| | self.ffn2 = Wav2Vec2ConformerFeedForward(config) |
| | self.final_layer_norm = nn.LayerNorm(embed_dim) |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | relative_position_embeddings: Optional[torch.Tensor] = None, |
| | output_attentions: bool = False, |
| | ): |
| | hidden_states = hidden_states |
| |
|
| | |
| | residual = hidden_states |
| | hidden_states = self.ffn1_layer_norm(hidden_states) |
| | hidden_states = self.ffn1(hidden_states) |
| | hidden_states = hidden_states * 0.5 + residual |
| | residual = hidden_states |
| |
|
| | |
| | hidden_states = self.self_attn_layer_norm(hidden_states) |
| | hidden_states, attn_weigts = self.self_attn( |
| | hidden_states=hidden_states, |
| | attention_mask=attention_mask, |
| | relative_position_embeddings=relative_position_embeddings, |
| | output_attentions=output_attentions, |
| | ) |
| | hidden_states = self.self_attn_dropout(hidden_states) |
| | hidden_states = hidden_states + residual |
| |
|
| | |
| | residual = hidden_states |
| | hidden_states = self.conv_module(hidden_states) |
| | hidden_states = residual + hidden_states |
| |
|
| | |
| | residual = hidden_states |
| | hidden_states = self.ffn2_layer_norm(hidden_states) |
| | hidden_states = self.ffn2(hidden_states) |
| | hidden_states = hidden_states * 0.5 + residual |
| | hidden_states = self.final_layer_norm(hidden_states) |
| |
|
| | return hidden_states, attn_weigts |
| |
|
| |
|
| | class Wav2Vec2ConformerEncoder(nn.Module): |
| | def __init__(self, config, is_causal=False): |
| | super().__init__() |
| | config.is_causal = is_causal |
| | self.config = config |
| |
|
| | if config.position_embeddings_type == "relative": |
| | self.embed_positions = Wav2Vec2ConformerRelPositionalEmbedding(config) |
| | elif config.position_embeddings_type == "rotary": |
| | self.embed_positions = Wav2Vec2ConformerRotaryPositionalEmbedding(config) |
| | else: |
| | self.embed_positions = None |
| |
|
| | self.pos_conv_embed = Wav2Vec2ConformerPositionalConvEmbedding(config) |
| | self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| | self.dropout = nn.Dropout(config.hidden_dropout) |
| | self.layers = nn.ModuleList([Wav2Vec2ConformerEncoderLayer(config) for _ in range(config.num_hidden_layers)]) |
| | self.gradient_checkpointing = False |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask=None, |
| | output_attentions=False, |
| | output_hidden_states=False, |
| | return_dict=True, |
| | ): |
| | all_hidden_states = () if output_hidden_states else None |
| | all_self_attentions = () if output_attentions else None |
| |
|
| | if attention_mask is not None: |
| | |
| | hidden_states[~attention_mask] = 0.0 |
| |
|
| | |
| | attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) |
| | attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min |
| | attention_mask = attention_mask.expand( |
| | attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] |
| | ) |
| |
|
| | hidden_states = self.dropout(hidden_states) |
| |
|
| | if self.embed_positions is not None: |
| | relative_position_embeddings = self.embed_positions(hidden_states) |
| | else: |
| | relative_position_embeddings = None |
| |
|
| | deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() |
| |
|
| | for i, layer in enumerate(self.layers): |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | |
| | dropout_probability = np.random.uniform(0, 1) |
| |
|
| | skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False |
| | if not skip_the_layer or deepspeed_zero3_is_enabled: |
| | |
| | if self.gradient_checkpointing and self.training: |
| | |
| | def create_custom_forward(module): |
| | def custom_forward(*inputs): |
| | return module(*inputs, output_attentions) |
| |
|
| | return custom_forward |
| |
|
| | layer_outputs = torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(layer), |
| | hidden_states, |
| | attention_mask, |
| | relative_position_embeddings, |
| | ) |
| | else: |
| | layer_outputs = layer( |
| | hidden_states, |
| | attention_mask=attention_mask, |
| | relative_position_embeddings=relative_position_embeddings, |
| | output_attentions=output_attentions, |
| | ) |
| | hidden_states = layer_outputs[0] |
| |
|
| | if skip_the_layer: |
| | layer_outputs = (None, None) |
| |
|
| | if output_attentions: |
| | all_self_attentions = all_self_attentions + (layer_outputs[1],) |
| |
|
| | hidden_states = self.layer_norm(hidden_states) |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) |
| | return BaseModelOutput( |
| | last_hidden_state=hidden_states, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attentions, |
| | ) |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerGumbelVectorQuantizer(nn.Module): |
| | """ |
| | Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH |
| | GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. |
| | """ |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | self.num_groups = config.num_codevector_groups |
| | self.num_vars = config.num_codevectors_per_group |
| |
|
| | if config.codevector_dim % self.num_groups != 0: |
| | raise ValueError( |
| | f"`config.codevector_dim {config.codevector_dim} must be divisible " |
| | f"by `config.num_codevector_groups` {self.num_groups} for concatenation" |
| | ) |
| |
|
| | |
| | self.codevectors = nn.Parameter( |
| | torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) |
| | ) |
| | self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars) |
| |
|
| | |
| | self.temperature = 2 |
| |
|
| | @staticmethod |
| | def _compute_perplexity(probs, mask=None): |
| | if mask is not None: |
| | mask_extended = mask.flatten()[:, None, None].expand(probs.shape) |
| | probs = torch.where(mask_extended, probs, torch.zeros_like(probs)) |
| | marginal_probs = probs.sum(dim=0) / mask.sum() |
| | else: |
| | marginal_probs = probs.mean(dim=0) |
| |
|
| | perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() |
| | return perplexity |
| |
|
| | def forward(self, hidden_states, mask_time_indices=None): |
| | batch_size, sequence_length, hidden_size = hidden_states.shape |
| |
|
| | |
| | hidden_states = self.weight_proj(hidden_states) |
| | hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) |
| |
|
| | if self.training: |
| | |
| | codevector_probs = nn.functional.gumbel_softmax( |
| | hidden_states.float(), tau=self.temperature, hard=True |
| | ).type_as(hidden_states) |
| |
|
| | |
| | codevector_soft_dist = torch.softmax( |
| | hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 |
| | ) |
| | perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices) |
| | else: |
| | |
| | |
| | codevector_idx = hidden_states.argmax(dim=-1) |
| | codevector_probs = hidden_states.new_zeros(hidden_states.shape).scatter_( |
| | -1, codevector_idx.view(-1, 1), 1.0 |
| | ) |
| | codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) |
| |
|
| | perplexity = self._compute_perplexity(codevector_probs, mask_time_indices) |
| |
|
| | codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) |
| | |
| | codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors |
| | codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) |
| | codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) |
| |
|
| | return codevectors, perplexity |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerAdapter(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| |
|
| | |
| | if config.output_hidden_size != config.hidden_size: |
| | self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) |
| | self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size) |
| | else: |
| | self.proj = self.proj_layer_norm = None |
| |
|
| | self.layers = nn.ModuleList(Wav2Vec2ConformerAdapterLayer(config) for _ in range(config.num_adapter_layers)) |
| | self.layerdrop = config.layerdrop |
| |
|
| | def forward(self, hidden_states): |
| | |
| | if self.proj is not None and self.proj_layer_norm is not None: |
| | hidden_states = self.proj(hidden_states) |
| | hidden_states = self.proj_layer_norm(hidden_states) |
| |
|
| | hidden_states = hidden_states.transpose(1, 2) |
| |
|
| | for layer in self.layers: |
| | layerdrop_prob = np.random.random() |
| | if not self.training or (layerdrop_prob > self.layerdrop): |
| | hidden_states = layer(hidden_states) |
| |
|
| | hidden_states = hidden_states.transpose(1, 2) |
| | return hidden_states |
| |
|
| |
|
| | |
| | class Wav2Vec2ConformerAdapterLayer(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.conv = nn.Conv1d( |
| | config.output_hidden_size, |
| | 2 * config.output_hidden_size, |
| | config.adapter_kernel_size, |
| | stride=config.adapter_stride, |
| | padding=1, |
| | ) |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = self.conv(hidden_states) |
| | hidden_states = nn.functional.glu(hidden_states, dim=1) |
| |
|
| | return hidden_states |
| |
|
| |
|
| | class Wav2Vec2ConformerPreTrainedModel(PreTrainedModel): |
| | """ |
| | An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| | models. |
| | """ |
| |
|
| | config_class = Wav2Vec2ConformerConfig |
| | base_model_prefix = "wav2vec2_conformer" |
| | main_input_name = "input_values" |
| | _keys_to_ignore_on_load_missing = [r"position_ids"] |
| | supports_gradient_checkpointing = True |
| |
|
| | def _init_weights(self, module): |
| | """Initialize the weights""" |
| | |
| | if isinstance(module, Wav2Vec2ConformerForPreTraining): |
| | module.project_hid.reset_parameters() |
| | module.project_q.reset_parameters() |
| | module.project_hid._is_hf_initialized = True |
| | module.project_q._is_hf_initialized = True |
| | |
| | elif isinstance(module, Wav2Vec2ConformerGumbelVectorQuantizer): |
| | module.weight_proj.weight.data.normal_(mean=0.0, std=1) |
| | module.weight_proj.bias.data.zero_() |
| | nn.init.uniform_(module.codevectors) |
| | elif isinstance(module, Wav2Vec2ConformerSelfAttention): |
| | if hasattr(module, "pos_bias_u"): |
| | nn.init.xavier_uniform_(module.pos_bias_u) |
| | if hasattr(module, "pos_bias_v"): |
| | nn.init.xavier_uniform_(module.pos_bias_v) |
| | elif isinstance(module, Wav2Vec2ConformerPositionalConvEmbedding): |
| | nn.init.normal_( |
| | module.conv.weight, |
| | mean=0, |
| | std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), |
| | ) |
| | nn.init.constant_(module.conv.bias, 0) |
| | elif isinstance(module, Wav2Vec2ConformerFeatureProjection): |
| | k = math.sqrt(1 / module.projection.in_features) |
| | nn.init.uniform_(module.projection.weight, a=-k, b=k) |
| | nn.init.uniform_(module.projection.bias, a=-k, b=k) |
| | elif isinstance(module, nn.Linear): |
| | module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| |
|
| | if module.bias is not None: |
| | module.bias.data.zero_() |
| | elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): |
| | module.bias.data.zero_() |
| | module.weight.data.fill_(1.0) |
| | elif isinstance(module, nn.Conv1d): |
| | nn.init.kaiming_normal_(module.weight) |
| |
|
| | if module.bias is not None: |
| | k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) |
| | nn.init.uniform_(module.bias, a=-k, b=k) |
| |
|
| | def _get_feat_extract_output_lengths( |
| | self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None |
| | ): |
| | """ |
| | Computes the output length of the convolutional layers |
| | """ |
| |
|
| | add_adapter = self.config.add_adapter if add_adapter is None else add_adapter |
| |
|
| | def _conv_out_length(input_length, kernel_size, stride): |
| | |
| | |
| | return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 |
| |
|
| | for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): |
| | input_lengths = _conv_out_length(input_lengths, kernel_size, stride) |
| |
|
| | if add_adapter: |
| | for _ in range(self.config.num_adapter_layers): |
| | input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) |
| |
|
| | return input_lengths |
| |
|
| | def _get_feature_vector_attention_mask( |
| | self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None |
| | ): |
| | |
| | |
| | non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] |
| |
|
| | output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) |
| | output_lengths = output_lengths.to(torch.long) |
| |
|
| | batch_size = attention_mask.shape[0] |
| |
|
| | attention_mask = torch.zeros( |
| | (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device |
| | ) |
| | |
| | attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 |
| | attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() |
| | return attention_mask |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, (Wav2Vec2ConformerEncoder, Wav2Vec2ConformerFeatureEncoder)): |
| | module.gradient_checkpointing = value |
| |
|
| |
|
| | WAV2VEC2_CONFORMER_START_DOCSTRING = r""" |
| | Wav2Vec2Conformer was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech |
| | Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael |
| | Auli. |
| | |
| | This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| | library implements for all its model (such as downloading or saving etc.). |
| | |
| | This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) sub-class. Use it as a |
| | regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. |
| | |
| | Parameters: |
| | config ([`Wav2Vec2ConformerConfig`]): Model configuration class with all the parameters of the model. |
| | Initializing with a config file does not load the weights associated with the model, only the |
| | configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| | """ |
| |
|
| |
|
| | WAV2VEC2_CONFORMER_INPUTS_DOCSTRING = r""" |
| | Args: |
| | input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): |
| | Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file |
| | into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install |
| | soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and |
| | conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details. |
| | attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, |
| | 1]`: |
| | |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | |
| | [What are attention masks?](../glossary#attention-mask) |
| | |
| | <Tip warning={true}> |
| | |
| | `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == |
| | True`. For all models whose processor has `config.return_attention_mask == False`, such as |
| | [wav2vec2-conformer-rel-pos-large](https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large), |
| | `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For |
| | such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware |
| | that these models also yield slightly different results depending on whether `input_values` is padded or |
| | not. |
| | |
| | </Tip> |
| | |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| | tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| | more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| |
|
| |
|
| | @add_start_docstrings( |
| | "The bare Wav2Vec2Conformer Model transformer outputting raw hidden-states without any specific head on top.", |
| | WAV2VEC2_CONFORMER_START_DOCSTRING, |
| | ) |
| | class Wav2Vec2ConformerModel(Wav2Vec2ConformerPreTrainedModel): |
| | def __init__(self, config: Wav2Vec2ConformerConfig): |
| | super().__init__(config) |
| | self.config = config |
| | self.feature_extractor = Wav2Vec2ConformerFeatureEncoder(config) |
| | self.feature_projection = Wav2Vec2ConformerFeatureProjection(config) |
| |
|
| | |
| | if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: |
| | self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_()) |
| |
|
| | self.encoder = Wav2Vec2ConformerEncoder(config) |
| |
|
| | self.adapter = Wav2Vec2ConformerAdapter(config) if config.add_adapter else None |
| |
|
| | |
| | self.post_init() |
| |
|
| | |
| | def freeze_feature_encoder(self): |
| | """ |
| | Calling this function will disable the gradient computation for the feature encoder so that its parameter will |
| | not be updated during training. |
| | """ |
| | self.feature_extractor._freeze_parameters() |
| |
|
| | |
| | def _mask_hidden_states( |
| | self, |
| | hidden_states: torch.FloatTensor, |
| | mask_time_indices: Optional[torch.FloatTensor] = None, |
| | attention_mask: Optional[torch.LongTensor] = None, |
| | ): |
| | """ |
| | Masks extracted features along time axis and/or along feature axis according to |
| | [SpecAugment](https://arxiv.org/abs/1904.08779). |
| | """ |
| |
|
| | |
| | if not getattr(self.config, "apply_spec_augment", True): |
| | return hidden_states |
| |
|
| | |
| | batch_size, sequence_length, hidden_size = hidden_states.size() |
| |
|
| | if mask_time_indices is not None: |
| | |
| | hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) |
| | elif self.config.mask_time_prob > 0 and self.training: |
| | mask_time_indices = _compute_mask_indices( |
| | (batch_size, sequence_length), |
| | mask_prob=self.config.mask_time_prob, |
| | mask_length=self.config.mask_time_length, |
| | attention_mask=attention_mask, |
| | min_masks=self.config.mask_time_min_masks, |
| | ) |
| | mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) |
| | hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) |
| |
|
| | if self.config.mask_feature_prob > 0 and self.training: |
| | |
| | mask_feature_indices = _compute_mask_indices( |
| | (batch_size, hidden_size), |
| | mask_prob=self.config.mask_feature_prob, |
| | mask_length=self.config.mask_feature_length, |
| | min_masks=self.config.mask_feature_min_masks, |
| | ) |
| | mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) |
| | mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) |
| | hidden_states[mask_feature_indices] = 0 |
| |
|
| | return hidden_states |
| |
|
| | @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=Wav2Vec2BaseModelOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | modality="audio", |
| | expected_output=_EXPECTED_OUTPUT_SHAPE, |
| | ) |
| | |
| | def forward( |
| | self, |
| | input_values: Optional[torch.Tensor], |
| | attention_mask: Optional[torch.Tensor] = None, |
| | mask_time_indices: Optional[torch.FloatTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | extract_features = self.feature_extractor(input_values) |
| | extract_features = extract_features.transpose(1, 2) |
| |
|
| | if attention_mask is not None: |
| | |
| | attention_mask = self._get_feature_vector_attention_mask( |
| | extract_features.shape[1], attention_mask, add_adapter=False |
| | ) |
| |
|
| | hidden_states, extract_features = self.feature_projection(extract_features) |
| | hidden_states = self._mask_hidden_states( |
| | hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask |
| | ) |
| |
|
| | encoder_outputs = self.encoder( |
| | hidden_states, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | hidden_states = encoder_outputs[0] |
| |
|
| | if self.adapter is not None: |
| | hidden_states = self.adapter(hidden_states) |
| |
|
| | if not return_dict: |
| | return (hidden_states, extract_features) + encoder_outputs[1:] |
| |
|
| | return Wav2Vec2BaseModelOutput( |
| | last_hidden_state=hidden_states, |
| | extract_features=extract_features, |
| | hidden_states=encoder_outputs.hidden_states, |
| | attentions=encoder_outputs.attentions, |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """Wav2Vec2Conformer Model with a quantizer and `VQ` head on top.""", WAV2VEC2_CONFORMER_START_DOCSTRING |
| | ) |
| | class Wav2Vec2ConformerForPreTraining(Wav2Vec2ConformerPreTrainedModel): |
| | |
| | def __init__(self, config: Wav2Vec2ConformerConfig): |
| | super().__init__(config) |
| | self.wav2vec2_conformer = Wav2Vec2ConformerModel(config) |
| | self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) |
| |
|
| | self.quantizer = Wav2Vec2ConformerGumbelVectorQuantizer(config) |
| |
|
| | self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) |
| | self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) |
| |
|
| | |
| | self.post_init() |
| |
|
| | |
| | def set_gumbel_temperature(self, temperature: int): |
| | """ |
| | Set the Gumbel softmax temperature to a given value. Only necessary for training |
| | """ |
| | self.quantizer.temperature = temperature |
| |
|
| | |
| | def freeze_feature_encoder(self): |
| | """ |
| | Calling this function will disable the gradient computation for the feature encoder so that its parameter will |
| | not be updated during training. |
| | """ |
| | self.wav2vec2_conformer.feature_extractor._freeze_parameters() |
| |
|
| | @staticmethod |
| | |
| | def compute_contrastive_logits( |
| | target_features: torch.FloatTensor, |
| | negative_features: torch.FloatTensor, |
| | predicted_features: torch.FloatTensor, |
| | temperature: int = 0.1, |
| | ): |
| | """ |
| | Compute logits for contrastive loss based using cosine similarity as the distance measure between |
| | `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. |
| | """ |
| | target_features = torch.cat([target_features, negative_features], dim=0) |
| |
|
| | logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1).type_as( |
| | target_features |
| | ) |
| |
|
| | |
| | logits = logits / temperature |
| | return logits |
| |
|
| | @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING) |
| | @replace_return_docstrings(output_type=Wav2Vec2ConformerForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) |
| | |
| | def forward( |
| | self, |
| | input_values: Optional[torch.Tensor], |
| | attention_mask: Optional[torch.Tensor] = None, |
| | mask_time_indices: Optional[torch.BoolTensor] = None, |
| | sampled_negative_indices: Optional[torch.BoolTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, Wav2Vec2ConformerForPreTrainingOutput]: |
| | r""" |
| | mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict |
| | masked extracted features in *config.proj_codevector_dim* space. |
| | sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*): |
| | Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss. |
| | Required input for pre-training. |
| | |
| | Returns: |
| | |
| | Example: |
| | |
| | ```python |
| | >>> import torch |
| | >>> from transformers import AutoFeatureExtractor, Wav2Vec2ConformerForPreTraining |
| | >>> from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import ( |
| | ... _compute_mask_indices, |
| | ... _sample_negative_indices, |
| | ... ) |
| | >>> from datasets import load_dataset |
| | |
| | >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") |
| | >>> model = Wav2Vec2ConformerForPreTraining.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") |
| | |
| | >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") |
| | >>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values # Batch size 1 |
| | |
| | >>> # compute masked indices |
| | >>> batch_size, raw_sequence_length = input_values.shape |
| | >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length).item() |
| | >>> mask_time_indices = _compute_mask_indices( |
| | ... shape=(batch_size, sequence_length), mask_prob=0.2, mask_length=2 |
| | ... ) |
| | >>> sampled_negative_indices = _sample_negative_indices( |
| | ... features_shape=(batch_size, sequence_length), |
| | ... num_negatives=model.config.num_negatives, |
| | ... mask_time_indices=mask_time_indices, |
| | ... ) |
| | >>> mask_time_indices = torch.tensor(data=mask_time_indices, device=input_values.device, dtype=torch.long) |
| | >>> sampled_negative_indices = torch.tensor( |
| | ... data=sampled_negative_indices, device=input_values.device, dtype=torch.long |
| | ... ) |
| | |
| | >>> with torch.no_grad(): |
| | ... outputs = model(input_values, mask_time_indices=mask_time_indices) |
| | |
| | >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) |
| | >>> cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) |
| | |
| | >>> # show that cosine similarity is much higher than random |
| | >>> cosine_sim[mask_time_indices.to(torch.bool)].mean() > 0.5 |
| | tensor(True) |
| | |
| | >>> # for contrastive loss training model should be put into train mode |
| | >>> model = model.train() |
| | >>> loss = model( |
| | ... input_values, mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices |
| | ... ).loss |
| | ```""" |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | if mask_time_indices is not None: |
| | mask_time_indices = mask_time_indices.to(torch.bool) |
| |
|
| | outputs = self.wav2vec2_conformer( |
| | input_values, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | mask_time_indices=mask_time_indices, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | |
| | transformer_features = self.project_hid(outputs[0]) |
| |
|
| | |
| | extract_features = self.dropout_features(outputs[1]) |
| |
|
| | if attention_mask is not None: |
| | |
| | attention_mask = self._get_feature_vector_attention_mask( |
| | extract_features.shape[1], attention_mask, add_adapter=False |
| | ) |
| |
|
| | quantized_features, codevector_perplexity = self.quantizer( |
| | extract_features, mask_time_indices=mask_time_indices |
| | ) |
| | quantized_features = self.project_q(quantized_features) |
| |
|
| | loss = contrastive_loss = diversity_loss = None |
| | if sampled_negative_indices is not None: |
| | batch_size, sequence_length, hidden_size = quantized_features.shape |
| |
|
| | |
| | |
| | |
| | |
| | negative_quantized_features = quantized_features.view(-1, hidden_size)[ |
| | sampled_negative_indices.long().view(-1) |
| | ] |
| | negative_quantized_features = negative_quantized_features.view( |
| | batch_size, sequence_length, -1, hidden_size |
| | ).permute(2, 0, 1, 3) |
| |
|
| | |
| | |
| | logits = self.compute_contrastive_logits( |
| | quantized_features[None, :], |
| | negative_quantized_features, |
| | transformer_features, |
| | self.config.contrastive_logits_temperature, |
| | ) |
| |
|
| | |
| | |
| | neg_is_pos = (quantized_features == negative_quantized_features).all(-1) |
| |
|
| | if neg_is_pos.any(): |
| | logits[1:][neg_is_pos] = float("-inf") |
| |
|
| | |
| | |
| | logits = logits.transpose(0, 2).reshape(-1, logits.size(0)) |
| | target = ((1 - mask_time_indices.long()) * -100).transpose(0, 1).flatten() |
| |
|
| | contrastive_loss = nn.functional.cross_entropy(logits.float(), target, reduction="sum") |
| | |
| | num_codevectors = self.config.num_codevectors_per_group * self.config.num_codevector_groups |
| | diversity_loss = ((num_codevectors - codevector_perplexity) / num_codevectors) * mask_time_indices.sum() |
| |
|
| | |
| | loss = contrastive_loss + self.config.diversity_loss_weight * diversity_loss |
| |
|
| | if not return_dict: |
| | if loss is not None: |
| | return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] |
| | return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] |
| |
|
| | return Wav2Vec2ConformerForPreTrainingOutput( |
| | loss=loss, |
| | projected_states=transformer_features, |
| | projected_quantized_states=quantized_features, |
| | codevector_perplexity=codevector_perplexity, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | contrastive_loss=contrastive_loss, |
| | diversity_loss=diversity_loss, |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """Wav2Vec2Conformer Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", |
| | WAV2VEC2_CONFORMER_START_DOCSTRING, |
| | ) |
| | class Wav2Vec2ConformerForCTC(Wav2Vec2ConformerPreTrainedModel): |
| | |
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | self.wav2vec2_conformer = Wav2Vec2ConformerModel(config) |
| | self.dropout = nn.Dropout(config.final_dropout) |
| |
|
| | if config.vocab_size is None: |
| | raise ValueError( |
| | f"You are trying to instantiate {self.__class__} with a configuration that " |
| | "does not define the vocabulary size of the language model head. Please " |
| | "instantiate the model as follows: `Wav2Vec2ConformerForCTC.from_pretrained(..., vocab_size=vocab_size)`. " |
| | "or define `vocab_size` of your model's configuration." |
| | ) |
| | output_hidden_size = ( |
| | config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size |
| | ) |
| | self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) |
| |
|
| | |
| | self.post_init() |
| |
|
| | |
| | def freeze_feature_encoder(self): |
| | """ |
| | Calling this function will disable the gradient computation for the feature encoder so that its parameter will |
| | not be updated during training. |
| | """ |
| | self.wav2vec2_conformer.feature_extractor._freeze_parameters() |
| |
|
| | @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=CausalLMOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | expected_output=_CTC_EXPECTED_OUTPUT, |
| | expected_loss=_CTC_EXPECTED_LOSS, |
| | ) |
| | |
| | def forward( |
| | self, |
| | input_values: Optional[torch.Tensor], |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | ) -> Union[Tuple, CausalLMOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): |
| | Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to |
| | the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. |
| | All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., |
| | config.vocab_size - 1]`. |
| | """ |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | outputs = self.wav2vec2_conformer( |
| | input_values, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | hidden_states = outputs[0] |
| | hidden_states = self.dropout(hidden_states) |
| |
|
| | logits = self.lm_head(hidden_states) |
| |
|
| | loss = None |
| | if labels is not None: |
| | if labels.max() >= self.config.vocab_size: |
| | raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") |
| |
|
| | |
| | attention_mask = ( |
| | attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) |
| | ) |
| | input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) |
| |
|
| | |
| | |
| | labels_mask = labels >= 0 |
| | target_lengths = labels_mask.sum(-1) |
| | flattened_targets = labels.masked_select(labels_mask) |
| |
|
| | |
| | log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) |
| |
|
| | with torch.backends.cudnn.flags(enabled=False): |
| | loss = nn.functional.ctc_loss( |
| | log_probs, |
| | flattened_targets, |
| | input_lengths, |
| | target_lengths, |
| | blank=self.config.pad_token_id, |
| | reduction=self.config.ctc_loss_reduction, |
| | zero_infinity=self.config.ctc_zero_infinity, |
| | ) |
| |
|
| | if not return_dict: |
| | output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return CausalLMOutput( |
| | loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | Wav2Vec2Conformer Model with a sequence classification head on top (a linear layer over the pooled output) for |
| | tasks like SUPERB Keyword Spotting. |
| | """, |
| | WAV2VEC2_CONFORMER_START_DOCSTRING, |
| | ) |
| | class Wav2Vec2ConformerForSequenceClassification(Wav2Vec2ConformerPreTrainedModel): |
| | |
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | if hasattr(config, "add_adapter") and config.add_adapter: |
| | raise ValueError( |
| | "Sequence classification does not support the use of Wav2Vec2Conformer adapters (config.add_adapter=True)" |
| | ) |
| | self.wav2vec2_conformer = Wav2Vec2ConformerModel(config) |
| | num_layers = config.num_hidden_layers + 1 |
| | if config.use_weighted_layer_sum: |
| | self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) |
| | self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) |
| | self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) |
| |
|
| | |
| | self.post_init() |
| |
|
| | |
| | def freeze_feature_encoder(self): |
| | """ |
| | Calling this function will disable the gradient computation for the feature encoder so that its parameter will |
| | not be updated during training. |
| | """ |
| | self.wav2vec2_conformer.feature_extractor._freeze_parameters() |
| |
|
| | def freeze_base_model(self): |
| | """ |
| | Calling this function will disable the gradient computation for the base model so that its parameters will not |
| | be updated during training. Only the classification head will be updated. |
| | """ |
| | for param in self.wav2vec2_conformer.parameters(): |
| | param.requires_grad = False |
| |
|
| | @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=SequenceClassifierOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | modality="audio", |
| | ) |
| | |
| | def forward( |
| | self, |
| | input_values: Optional[torch.Tensor], |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | ) -> Union[Tuple, SequenceClassifierOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| | """ |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| | output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states |
| |
|
| | outputs = self.wav2vec2_conformer( |
| | input_values, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | if self.config.use_weighted_layer_sum: |
| | hidden_states = outputs[_HIDDEN_STATES_START_POSITION] |
| | hidden_states = torch.stack(hidden_states, dim=1) |
| | norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) |
| | hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) |
| | else: |
| | hidden_states = outputs[0] |
| |
|
| | hidden_states = self.projector(hidden_states) |
| | if attention_mask is None: |
| | pooled_output = hidden_states.mean(dim=1) |
| | else: |
| | padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) |
| | hidden_states[~padding_mask] = 0.0 |
| | pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) |
| |
|
| | logits = self.classifier(pooled_output) |
| |
|
| | loss = None |
| | if labels is not None: |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) |
| |
|
| | if not return_dict: |
| | output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return SequenceClassifierOutput( |
| | loss=loss, |
| | logits=logits, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | Wav2Vec2Conformer Model with a frame classification head on top for tasks like Speaker Diarization. |
| | """, |
| | WAV2VEC2_CONFORMER_START_DOCSTRING, |
| | ) |
| | class Wav2Vec2ConformerForAudioFrameClassification(Wav2Vec2ConformerPreTrainedModel): |
| | |
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | if hasattr(config, "add_adapter") and config.add_adapter: |
| | raise ValueError( |
| | "Audio frame classification does not support the use of Wav2Vec2Conformer adapters (config.add_adapter=True)" |
| | ) |
| | self.wav2vec2_conformer = Wav2Vec2ConformerModel(config) |
| | num_layers = config.num_hidden_layers + 1 |
| | if config.use_weighted_layer_sum: |
| | self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) |
| | self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
| | self.num_labels = config.num_labels |
| |
|
| | self.init_weights() |
| |
|
| | |
| | def freeze_feature_encoder(self): |
| | """ |
| | Calling this function will disable the gradient computation for the feature encoder so that its parameter will |
| | not be updated during training. |
| | """ |
| | self.wav2vec2_conformer.feature_extractor._freeze_parameters() |
| |
|
| | |
| | def freeze_base_model(self): |
| | """ |
| | Calling this function will disable the gradient computation for the base model so that its parameters will not |
| | be updated during training. Only the classification head will be updated. |
| | """ |
| | for param in self.wav2vec2_conformer.parameters(): |
| | param.requires_grad = False |
| |
|
| | @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=TokenClassifierOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | modality="audio", |
| | ) |
| | |
| | def forward( |
| | self, |
| | input_values: Optional[torch.Tensor], |
| | attention_mask: Optional[torch.Tensor] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, TokenClassifierOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| | """ |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| | output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states |
| |
|
| | outputs = self.wav2vec2_conformer( |
| | input_values, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | if self.config.use_weighted_layer_sum: |
| | hidden_states = outputs[_HIDDEN_STATES_START_POSITION] |
| | hidden_states = torch.stack(hidden_states, dim=1) |
| | norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) |
| | hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) |
| | else: |
| | hidden_states = outputs[0] |
| |
|
| | logits = self.classifier(hidden_states) |
| |
|
| | loss = None |
| | if labels is not None: |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) |
| |
|
| | if not return_dict: |
| | output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] |
| | return output |
| |
|
| | return TokenClassifierOutput( |
| | loss=loss, |
| | logits=logits, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|
| |
|
| | |
| | class AMSoftmaxLoss(nn.Module): |
| | def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): |
| | super(AMSoftmaxLoss, self).__init__() |
| | self.scale = scale |
| | self.margin = margin |
| | self.num_labels = num_labels |
| | self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) |
| | self.loss = nn.CrossEntropyLoss() |
| |
|
| | def forward(self, hidden_states, labels): |
| | labels = labels.flatten() |
| | weight = nn.functional.normalize(self.weight, dim=0) |
| | hidden_states = nn.functional.normalize(hidden_states, dim=1) |
| | cos_theta = torch.mm(hidden_states, weight) |
| | psi = cos_theta - self.margin |
| |
|
| | onehot = nn.functional.one_hot(labels, self.num_labels) |
| | logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) |
| | loss = self.loss(logits, labels) |
| |
|
| | return loss |
| |
|
| |
|
| | |
| | class TDNNLayer(nn.Module): |
| | def __init__(self, config, layer_id=0): |
| | super().__init__() |
| | self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] |
| | self.out_conv_dim = config.tdnn_dim[layer_id] |
| | self.kernel_size = config.tdnn_kernel[layer_id] |
| | self.dilation = config.tdnn_dilation[layer_id] |
| |
|
| | self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) |
| | self.activation = nn.ReLU() |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = hidden_states.unsqueeze(1) |
| | hidden_states = nn.functional.unfold( |
| | hidden_states, |
| | (self.kernel_size, self.in_conv_dim), |
| | stride=(1, self.in_conv_dim), |
| | dilation=(self.dilation, 1), |
| | ) |
| | hidden_states = hidden_states.transpose(1, 2) |
| | hidden_states = self.kernel(hidden_states) |
| |
|
| | hidden_states = self.activation(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | Wav2Vec2Conformer Model with an XVector feature extraction head on top for tasks like Speaker Verification. |
| | """, |
| | WAV2VEC2_CONFORMER_START_DOCSTRING, |
| | ) |
| | class Wav2Vec2ConformerForXVector(Wav2Vec2ConformerPreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | self.wav2vec2_conformer = Wav2Vec2ConformerModel(config) |
| | num_layers = config.num_hidden_layers + 1 |
| | if config.use_weighted_layer_sum: |
| | self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) |
| | self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) |
| |
|
| | tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] |
| | self.tdnn = nn.ModuleList(tdnn_layers) |
| |
|
| | self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) |
| | self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) |
| |
|
| | self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) |
| |
|
| | self.init_weights() |
| |
|
| | |
| | def freeze_feature_encoder(self): |
| | """ |
| | Calling this function will disable the gradient computation for the feature encoder so that its parameter will |
| | not be updated during training. |
| | """ |
| | self.wav2vec2_conformer.feature_extractor._freeze_parameters() |
| |
|
| | |
| | def freeze_base_model(self): |
| | """ |
| | Calling this function will disable the gradient computation for the base model so that its parameters will not |
| | be updated during training. Only the classification head will be updated. |
| | """ |
| | for param in self.wav2vec2_conformer.parameters(): |
| | param.requires_grad = False |
| |
|
| | |
| | def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): |
| | """ |
| | Computes the output length of the TDNN layers |
| | """ |
| |
|
| | def _conv_out_length(input_length, kernel_size, stride): |
| | |
| | |
| | return (input_length - kernel_size) // stride + 1 |
| |
|
| | for kernel_size in self.config.tdnn_kernel: |
| | input_lengths = _conv_out_length(input_lengths, kernel_size, 1) |
| |
|
| | return input_lengths |
| |
|
| | @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=XVectorOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | modality="audio", |
| | ) |
| | |
| | def forward( |
| | self, |
| | input_values: Optional[torch.Tensor], |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | ) -> Union[Tuple, XVectorOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| | """ |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| | output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states |
| |
|
| | outputs = self.wav2vec2_conformer( |
| | input_values, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | if self.config.use_weighted_layer_sum: |
| | hidden_states = outputs[_HIDDEN_STATES_START_POSITION] |
| | hidden_states = torch.stack(hidden_states, dim=1) |
| | norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) |
| | hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) |
| | else: |
| | hidden_states = outputs[0] |
| |
|
| | hidden_states = self.projector(hidden_states) |
| |
|
| | for tdnn_layer in self.tdnn: |
| | hidden_states = tdnn_layer(hidden_states) |
| |
|
| | |
| | if attention_mask is None: |
| | mean_features = hidden_states.mean(dim=1) |
| | std_features = hidden_states.std(dim=1) |
| | else: |
| | feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) |
| | tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) |
| | mean_features = [] |
| | std_features = [] |
| | for i, length in enumerate(tdnn_output_lengths): |
| | mean_features.append(hidden_states[i, :length].mean(dim=0)) |
| | std_features.append(hidden_states[i, :length].std(dim=0)) |
| | mean_features = torch.stack(mean_features) |
| | std_features = torch.stack(std_features) |
| | statistic_pooling = torch.cat([mean_features, std_features], dim=-1) |
| |
|
| | output_embeddings = self.feature_extractor(statistic_pooling) |
| | logits = self.classifier(output_embeddings) |
| |
|
| | loss = None |
| | if labels is not None: |
| | loss = self.objective(logits, labels) |
| |
|
| | if not return_dict: |
| | output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return XVectorOutput( |
| | loss=loss, |
| | logits=logits, |
| | embeddings=output_embeddings, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|