text stringlengths 1 1.02k | class_index int64 0 10.8k | source stringlengths 85 188 |
|---|---|---|
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
if isinstance(past_key_value, StaticCache):
raise ValueError(
"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
)
output_attentions = False
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states) | 3,688 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) | 3,688 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
# to be able to avoid many of these transpose/reshape/view.
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (MimiRMSNorm handles it correctly) | 3,688 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
input_dtype = query_states.dtype
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}."
)
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype) | 3,688 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=getattr(self, "sliding_window", None),
is_causal=self.is_causal,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value | 3,688 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiSdpaAttention(MimiAttention):
"""
Mimi attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`MimiAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
""" | 3,689 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# Adapted from MimiAttention.forward
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
if output_attentions:
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
logger.warning_once(
"MimiModel is using MimiSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " | 3,689 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
return super().forward(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
) | 3,689 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) | 3,689 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
# Reference: https://github.com/pytorch/pytorch/issues/112577.
if query_states.device.type == "cuda" and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous() | 3,689 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
is_causal = True if causal_mask is None and q_len > 1 else False
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=causal_mask,
dropout_p=self.attention_dropout if self.training else 0.0,
is_causal=is_causal,
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return attn_output, None, past_key_value | 3,689 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiTransformerLayer(nn.Module):
def __init__(self, config: MimiConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MIMI_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
self.mlp = MimiMLP(config)
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
self.self_attn_layer_scale = MimiLayerScale(config)
self.mlp_layer_scale = MimiLayerScale(config) | 3,690 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*): | 3,690 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states | 3,690 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + self.self_attn_layer_scale(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.mlp_layer_scale(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs | 3,690 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiTransformerModel(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MimiTransformerLayer`]
Args:
config: MimiConfig
"""
def __init__(self, config: MimiConfig):
super().__init__()
self.layers = nn.ModuleList(
[MimiTransformerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self._attn_implementation = config._attn_implementation
self.gradient_checkpointing = False
self.config = config | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def forward(
self,
hidden_states: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Embedded representation that will be contextualized by the model
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy. | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
Two formats are allowed:
- a [`~cache_utils.Cache`] instance;
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
cache format.
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
legacy cache format will be returned. | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*): | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if use_cache and not isinstance(past_key_values, Cache):
if past_key_values is None:
past_key_values = DynamicCache()
else:
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
logger.warning_once(
"We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
"will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
"(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
) | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + hidden_states.shape[1], device=hidden_states.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = None
if attention_mask is not None:
causal_mask = self._update_causal_mask(
attention_mask, hidden_states, cache_position, past_key_values, output_attentions
)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,) | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
decoder_layer.__call__,
hidden_states,
causal_mask,
position_ids,
past_key_values,
output_attentions,
use_cache,
cache_position,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache = layer_outputs[2 if output_attentions else 1] | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if output_attentions:
all_self_attns += (layer_outputs[1],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
) | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# Copied from transformers.models.phi3.modeling_phi3.Phi3Model._update_causal_mask with Phi3->Mimi
def _update_causal_mask(
self,
attention_mask: torch.Tensor,
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and past_key_values is not None:
is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
if is_padding_right:
raise ValueError(
"You are attempting to perform batched generation with padding_side='right'"
" this may lead to unexpected behaviour for Flash Attention version of Mimi. Make sure to "
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
) | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache) | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if (
self.config._attn_implementation == "sdpa"
and not (using_static_cache or using_sliding_window_cache)
and not output_attentions
):
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
sliding_window=self.config.sliding_window,
is_training=self.training,
):
return None | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
dtype, device = input_tensor.dtype, input_tensor.device
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
# SlidingWindowCache or StaticCache
if using_sliding_window_cache or using_static_cache:
target_length = past_key_values.get_max_cache_shape()
# DynamicCache or no cache
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
) | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
device=device,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
config=self.config,
past_key_values=past_key_values,
) | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type == "cuda"
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
@staticmethod
# Copied from transformers.models.mistral.modeling_mistral.MistralModel._prepare_4d_causal_attention_mask_with_cache_position with Mistral->Mimi
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
device: torch.device,
cache_position: torch.Tensor,
batch_size: int,
config: MimiConfig,
past_key_values: Cache,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
device (`torch.device`):
The device to plcae the 4D attention mask on.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
config (`MimiConfig`):
The model's configuration class | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
past_key_values (`Cache`):
The cache class that is being used currently to generate
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
)
diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
if config.sliding_window is not None:
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
# the check is needed to verify is current checkpoint was trained with sliding window or not | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
sliding_attend_mask = torch.arange(target_length, device=device) <= (
cache_position.reshape(-1, 1) - config.sliding_window
)
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
causal_mask *= diagonal_attend_mask
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.shape[-1] > target_length:
attention_mask = attention_mask[:, :target_length]
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
padding_mask = padding_mask == 0 | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask | 3,691 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiDecoder(nn.Module):
"""SEANet decoder as used by Mimi."""
def __init__(self, config: MimiConfig):
super().__init__()
scaling = int(2 ** len(config.upsampling_ratios))
model = [MimiConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)]
# Upsample to raw audio scale
for ratio in config.upsampling_ratios:
current_scale = scaling * config.num_filters
# Add upsampling layers
model += [nn.ELU()]
model += [
MimiConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio)
]
# Add residual layers
for j in range(config.num_residual_layers):
model += [MimiResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))]
scaling //= 2 | 3,692 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# Add final layers
model += [nn.ELU()]
model += [MimiConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)]
self.layers = nn.ModuleList(model)
# Copied from transformers.models.encodec.modeling_encodec.EncodecDecoder.forward
def forward(self, hidden_states):
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states | 3,692 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiEuclideanCodebook(nn.Module):
"""Codebook with Euclidean distance."""
def __init__(self, config: MimiConfig, epsilon: float = 1e-5):
super().__init__()
embed = torch.zeros(config.codebook_size, config.codebook_dim)
self.codebook_size = config.codebook_size
self.register_buffer("initialized", torch.Tensor([True]))
self.register_buffer("cluster_usage", torch.ones(config.codebook_size))
self.register_buffer("embed_sum", embed)
self._embed = None
self.epsilon = epsilon
@property
def embed(self) -> torch.Tensor:
if self._embed is None:
self._embed = self.embed_sum / self.cluster_usage.clamp(min=self.epsilon)[:, None]
return self._embed | 3,693 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def quantize(self, hidden_states):
# Projects each vector in `hidden_states` over the nearest centroid and return its index.
# `hidden_states` should be `[N, D]` with `N` the number of input vectors and `D` the dimension.
dists = torch.cdist(hidden_states[None], self.embed[None], p=2)[0]
embed_ind = dists.argmin(dim=-1)
return embed_ind
# Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.encode
def encode(self, hidden_states):
shape = hidden_states.shape
# pre-process
hidden_states = hidden_states.reshape((-1, shape[-1]))
# quantize
embed_ind = self.quantize(hidden_states)
# post-process
embed_ind = embed_ind.view(*shape[:-1])
return embed_ind
# Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.decode
def decode(self, embed_ind):
quantize = nn.functional.embedding(embed_ind, self.embed)
return quantize | 3,693 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiVectorQuantization(nn.Module):
"""
Vector quantization implementation. Currently supports only euclidean distance.
"""
def __init__(self, config: MimiConfig):
super().__init__()
self.codebook = MimiEuclideanCodebook(config)
def encode(self, hidden_states):
hidden_states = hidden_states.permute(0, 2, 1)
embed_in = self.codebook.encode(hidden_states)
return embed_in
def decode(self, embed_ind):
quantize = self.codebook.decode(embed_ind)
quantize = quantize.permute(0, 2, 1)
return quantize | 3,694 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiResidualVectorQuantizer(nn.Module):
"""Residual Vector Quantizer."""
def __init__(self, config: MimiConfig, num_quantizers: int = None):
super().__init__()
self.codebook_size = config.codebook_size
self.frame_rate = config.frame_rate
self.num_quantizers = num_quantizers if num_quantizers is not None else config.num_quantizers
self.layers = nn.ModuleList([MimiVectorQuantization(config) for _ in range(self.num_quantizers)])
self.input_proj = None
self.output_proj = None
if config.vector_quantization_hidden_dimension != config.hidden_size:
self.input_proj = torch.nn.Conv1d(
config.hidden_size, config.vector_quantization_hidden_dimension, 1, bias=False
)
self.output_proj = torch.nn.Conv1d(
config.vector_quantization_hidden_dimension, config.hidden_size, 1, bias=False
) | 3,695 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[int] = None) -> torch.Tensor:
"""
Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets
the appropriate number of quantizers to use and returns indices for each quantizer.
"""
if self.input_proj is not None:
embeddings = self.input_proj(embeddings)
num_quantizers = num_quantizers if num_quantizers is not None else self.num_quantizers
residual = embeddings
all_indices = []
for layer in self.layers[:num_quantizers]:
indices = layer.encode(residual)
quantized = layer.decode(indices)
residual = residual - quantized
all_indices.append(indices)
out_indices = torch.stack(all_indices)
return out_indices | 3,695 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes of shape [B, K, T] to the quantized representation."""
quantized_out = torch.tensor(0.0, device=codes.device)
codes = codes.transpose(0, 1)
for i, indices in enumerate(codes):
layer = self.layers[i]
quantized = layer.decode(indices)
quantized_out = quantized_out + quantized
if self.output_proj is not None:
quantized_out = self.output_proj(quantized_out)
return quantized_out | 3,695 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiSplitResidualVectorQuantizer(nn.Module):
"""Split Residual Vector Quantizer."""
def __init__(self, config: MimiConfig):
super().__init__()
self.codebook_size = config.codebook_size
self.frame_rate = config.frame_rate
self.max_num_quantizers = config.num_quantizers
self.num_semantic_quantizers = config.num_semantic_quantizers
self.num_acoustic_quantizers = config.num_quantizers - config.num_semantic_quantizers
self.semantic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_semantic_quantizers)
self.acoustic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_acoustic_quantizers) | 3,696 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[float] = None) -> torch.Tensor:
"""
Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets
the appropriate number of quantizers to use and returns indices for each quantizer.
"""
num_quantizers = self.max_num_quantizers if num_quantizers is None else num_quantizers
if num_quantizers > self.max_num_quantizers:
raise ValueError(
f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.max_num_quantizers}, but is currently {num_quantizers}."
) | 3,696 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if num_quantizers < self.num_semantic_quantizers:
raise ValueError(
f"The number of quantizers (i.e codebooks) asked should be higher than the number of semantic quantizers {self.num_semantic_quantizers}, but is currently {num_quantizers}."
)
# codes is [K, B, T], with T frames, K nb of codebooks.
codes = self.semantic_residual_vector_quantizer.encode(embeddings)
if num_quantizers > self.num_semantic_quantizers:
acoustic_codes = self.acoustic_residual_vector_quantizer.encode(
embeddings, num_quantizers=num_quantizers - self.num_semantic_quantizers
)
codes = torch.cat([codes, acoustic_codes], dim=0)
return codes
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation.""" | 3,696 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# The first num_semantic_quantizers codebooks are decoded using the semantic RVQ
quantized_out = self.semantic_residual_vector_quantizer.decode(codes[:, : self.num_semantic_quantizers])
# The rest of the codebooks are decoded using the acoustic RVQ
if codes.shape[1] > self.num_semantic_quantizers:
quantized_out += self.acoustic_residual_vector_quantizer.decode(codes[:, self.num_semantic_quantizers :])
return quantized_out | 3,696 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MimiConfig
base_model_prefix = "mimi"
main_input_name = "input_values"
supports_gradient_checkpointing = True
_no_split_modules = ["MimiDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn_2 = True
_supports_sdpa = True
_supports_cache_class = True
_supports_static_cache = True | 3,697 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# Copied from transformers.models.encodec.modeling_encodec.EncodecPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None: | 3,697 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LSTM):
for name, param in module.named_parameters():
if "weight" in name:
nn.init.xavier_uniform_(param)
elif "bias" in name:
nn.init.constant_(param, 0.0) | 3,697 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiModel(MimiPreTrainedModel):
def __init__(self, config: MimiConfig):
super().__init__(config)
self.config = config
self.encoder = MimiEncoder(config)
self.encoder_transformer = MimiTransformerModel(config)
self.downsample = None
self.upsample = None
if config.frame_rate != config.encodec_frame_rate:
self.downsample = MimiConv1d(
config,
config.hidden_size,
config.hidden_size,
kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate),
stride=2,
bias=False,
pad_mode="replicate",
) | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
self.upsample = MimiConvTranspose1d(
config,
config.hidden_size,
config.hidden_size,
kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate),
stride=2,
bias=False,
groups=config.upsample_groups,
)
self.decoder_transformer = MimiTransformerModel(config)
self.decoder = MimiDecoder(config)
self.quantizer = MimiSplitResidualVectorQuantizer(config)
self.bits_per_codebook = int(math.log2(self.config.codebook_size))
if 2**self.bits_per_codebook != self.config.codebook_size:
raise ValueError("The codebook_size must be a power of 2.")
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def _encode_frame(
self,
input_values: torch.Tensor,
num_quantizers: int,
padding_mask: int,
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
return_dict: Optional[bool] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Encodes the given input using the underlying VQVAE. The padding mask is required to compute the correct scale.
"""
embeddings = self.encoder(input_values)
encoder_outputs = self.encoder_transformer(
embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict
)
if return_dict:
past_key_values = encoder_outputs.get("past_key_values")
elif len(encoder_outputs) > 1:
past_key_values = encoder_outputs[1]
embeddings = encoder_outputs[0].transpose(1, 2)
embeddings = self.downsample(embeddings) | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
codes = self.quantizer.encode(embeddings, num_quantizers)
codes = codes.transpose(0, 1)
return codes, past_key_values
def encode(
self,
input_values: torch.Tensor,
padding_mask: torch.Tensor = None,
num_quantizers: Optional[float] = None,
encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], MimiEncoderOutput]:
"""
Encodes the input audio waveform into discrete codes. | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
Args:
input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Float values of the input audio waveform.
padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*.
num_quantizers (`int`, *optional*):
Number of quantizers (i.e codebooks) to use. By default, all quantizers are used.
encoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
`codebook` of shape `[batch_size, num_codebooks, frames]`, the discrete encoded codes for the input audio waveform.
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
num_quantizers = self.config.num_quantizers if num_quantizers is None else num_quantizers | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if num_quantizers > self.config.num_quantizers:
raise ValueError(
f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.config.num_quantizers}, but is currently {num_quantizers}."
)
_, channels, input_length = input_values.shape
if channels < 1 or channels > 2:
raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}")
if padding_mask is None:
padding_mask = torch.ones_like(input_values).bool()
encoded_frames, encoder_past_key_values = self._encode_frame(
input_values,
num_quantizers,
padding_mask.bool(),
past_key_values=encoder_past_key_values,
return_dict=return_dict,
)
if not return_dict:
return (
encoded_frames,
encoder_past_key_values,
) | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
return MimiEncoderOutput(encoded_frames, encoder_past_key_values)
def _decode_frame(
self,
codes: torch.Tensor,
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
return_dict: Optional[bool] = None,
) -> torch.Tensor:
embeddings = self.quantizer.decode(codes)
embeddings = self.upsample(embeddings)
decoder_outputs = self.decoder_transformer(
embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict
)
if return_dict:
past_key_values = decoder_outputs.get("past_key_values")
elif len(decoder_outputs) > 1:
past_key_values = decoder_outputs[1]
embeddings = decoder_outputs[0].transpose(1, 2)
outputs = self.decoder(embeddings)
return outputs, past_key_values | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def decode(
self,
audio_codes: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor, torch.Tensor], MimiDecoderOutput]:
"""
Decodes the given frames into an output audio waveform.
Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
trimmed. | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
Args:
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
Discret code embeddings computed using `model.encode`.
padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*.
decoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input. | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
audio_values, decoder_past_key_values = self._decode_frame(
audio_codes, past_key_values=decoder_past_key_values, return_dict=return_dict
)
# truncate based on padding mask
if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]:
audio_values = audio_values[..., : padding_mask.shape[-1]] | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if not return_dict:
return (
audio_values,
decoder_past_key_values,
)
return MimiDecoderOutput(audio_values, decoder_past_key_values)
@add_start_docstrings_to_model_forward(MIMI_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=MimiOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
num_quantizers: Optional[int] = None,
audio_codes: Optional[torch.Tensor] = None,
encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor, torch.Tensor], MimiOutput]:
r"""
Returns:
Examples: | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
```python
>>> from datasets import load_dataset
>>> from transformers import AutoFeatureExtractor, MimiModel
>>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
>>> audio_sample = dataset["train"]["audio"][0]["array"]
>>> model_id = "kyutai/mimi"
>>> model = MimiModel.from_pretrained(model_id)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
>>> inputs = feature_extractor(raw_audio=audio_sample, return_tensors="pt")
>>> outputs = model(**inputs)
>>> audio_codes = outputs.audio_codes
>>> audio_values = outputs.audio_values
```"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
if padding_mask is None:
padding_mask = torch.ones_like(input_values).bool() | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if audio_codes is None:
encoder_outputs = self.encode(
input_values, padding_mask, num_quantizers, encoder_past_key_values, return_dict=return_dict
)
audio_codes = encoder_outputs[0]
if return_dict:
encoder_past_key_values = encoder_outputs.get("past_key_values")
elif len(encoder_outputs) > 1:
encoder_past_key_values = encoder_outputs[1]
decoder_outputs = self.decode(audio_codes, padding_mask, decoder_past_key_values, return_dict=return_dict)
audio_values = decoder_outputs[0]
if return_dict:
decoder_past_key_values = decoder_outputs.get("past_key_values")
elif len(decoder_outputs) > 1:
decoder_past_key_values = decoder_outputs[1]
if not return_dict:
return (audio_codes, audio_values, encoder_past_key_values, decoder_past_key_values) | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
return MimiOutput(
audio_codes=audio_codes,
audio_values=audio_values,
encoder_past_key_values=encoder_past_key_values,
decoder_past_key_values=decoder_past_key_values,
) | 3,698 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class WavLMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`WavLMModel`]. It is used to instantiate an WavLM
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the WavLM
[microsoft/wavlm-base](https://huggingface.co/microsoft/wavlm-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
Args:
vocab_size (`int`, *optional*, defaults to 32):
Vocabulary size of the WavLM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`WavLMModel`]. Vocabulary size of the model. Defines the different tokens
that can be represented by the *inputs_ids* passed to the forward method of [`WavLMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for activations inside the fully connected layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
final_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the final projection layer of [`WavLMForCTC`].
layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
details.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
convolutional layers.
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for output of the feature encoder.
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
The non-linear activation function (function or string) in the 1D convolutional layers of the feature | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
length of *conv_kernel* defines the number of convolutional layers and has to match the length of
*conv_dim*.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether the 1D convolutional layers have a bias.
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer.
do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
False` corresponds to applying layer norm after the attention layer. | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
apply_spec_augment (`bool`, *optional*, defaults to `True`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://arxiv.org/abs/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Propability of each feature vector along the time axis to be chosen as the start of the vector span to be
masked. Approximately `mask_time_prob * sequence_length // mask_time_length` feature vectors will be masked
along the time axis. This is only relevant if `apply_spec_augment is True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Propability of each feature vector along the feature axis to be chosen as the start of the vector span to
be masked. Approximately `mask_time_prob * hidden_size // mask_time_length` feature vectors will be masked
along the time axis. This is only relevant if `apply_spec_augment is True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
num_codevectors_per_group (`int`, *optional*, defaults to 320):
Number of entries in each quantization codebook (group).
num_codevector_groups (`int`, *optional*, defaults to 2):
Number of codevector groups for product codevector quantization. | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
The temperature *kappa* in the contrastive loss.
num_negatives (`int`, *optional*, defaults to 100):
Number of negative samples for the contrastive loss.
codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the quantized feature vectors.
proj_codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the final projection of both the quantized and the transformer features.
diversity_loss_weight (`int`, *optional*, defaults to 0.1):
The weight of the codebook diversity loss component.
ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`WavLMForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`): | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`WavLMForCTC`].
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`WavLMForSequenceClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification.
tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
*XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
*XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
xvector_output_dim (`int`, *optional*, defaults to 512):
Dimensionality of the *XVector* embedding vectors.
add_adapter (`bool`, *optional*, defaults to `False`):
Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for
warm-starting Wav2Vec2 for SpeechEncoderDecoder models. | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
adapter_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adapter_stride (`int`, *optional*, defaults to 2):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
num_adapter_layers (`int`, *optional*, defaults to 3):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
output_hidden_size (`int`, *optional*):
Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
if `add_adapter is True`. | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
Example:
```python
```
Example:
```python
>>> from transformers import WavLMConfig, WavLMModel
>>> # Initializing a WavLM facebook/wavlm-base-960h style configuration
>>> configuration = WavLMConfig()
>>> # Initializing a model (with random weights) from the facebook/wavlm-base-960h style configuration
>>> model = WavLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "wavlm" | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
def __init__(
self,
vocab_size=32,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout=0.1,
activation_dropout=0.1,
attention_dropout=0.1,
feat_proj_dropout=0.0,
final_dropout=0.1,
layerdrop=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
feat_extract_norm="group",
feat_extract_activation="gelu",
conv_dim=(512, 512, 512, 512, 512, 512, 512),
conv_stride=(5, 2, 2, 2, 2, 2, 2),
conv_kernel=(10, 3, 3, 3, 3, 2, 2),
conv_bias=False,
num_conv_pos_embeddings=128,
num_conv_pos_embedding_groups=16,
num_buckets=320,
max_bucket_distance=800,
do_stable_layer_norm=False,
apply_spec_augment=True,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0, | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
mask_feature_length=10,
num_codevectors_per_group=320,
num_codevector_groups=2,
contrastive_logits_temperature=0.1,
num_negatives=100,
codevector_dim=256,
proj_codevector_dim=256,
diversity_loss_weight=0.1,
ctc_loss_reduction="mean",
ctc_zero_infinity=False,
use_weighted_layer_sum=False,
classifier_proj_size=256,
tdnn_dim=(512, 512, 512, 512, 1500),
tdnn_kernel=(5, 3, 3, 1, 1),
tdnn_dilation=(1, 2, 3, 1, 1),
xvector_output_dim=512,
num_ctc_classes=80,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
add_adapter=False,
adapter_kernel_size=3,
adapter_stride=2,
num_adapter_layers=3,
output_hidden_size=None,
**kwargs,
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_buckets = num_buckets
self.max_bucket_distance = max_bucket_distance
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.num_ctc_classes = num_ctc_classes
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
) | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
# parameters for pretraining with codevector quantized representations
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
# ctc loss
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
# adapter
self.add_adapter = add_adapter
self.adapter_kernel_size = adapter_kernel_size
self.adapter_stride = adapter_stride
self.num_adapter_layers = num_adapter_layers
self.output_hidden_size = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
self.classifier_proj_size = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
self.tdnn_dim = list(tdnn_dim)
self.tdnn_kernel = list(tdnn_kernel)
self.tdnn_dilation = list(tdnn_dilation)
self.xvector_output_dim = xvector_output_dim
@property
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1) | 3,699 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py |
class WavLMNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states | 3,700 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
class WavLMLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states | 3,701 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
class WavLMGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states | 3,702 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
class WavLMPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
if is_deepspeed_zero3_enabled():
import deepspeed | 3,703 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = weight_norm(self.conv, name="weight", dim=2)
if hasattr(self.conv, "parametrizations"):
weight_g = self.conv.parametrizations.weight.original0
weight_v = self.conv.parametrizations.weight.original1
else:
weight_g = self.conv.weight_g
weight_v = self.conv.weight_v
deepspeed.zero.register_external_parameter(self, weight_v)
deepspeed.zero.register_external_parameter(self, weight_g)
else:
self.conv = weight_norm(self.conv, name="weight", dim=2)
self.padding = WavLMSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2) | 3,703 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states | 3,703 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
class WavLMSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states | 3,704 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
class WavLMFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [
WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True | 3,705 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
hidden_states = self._gradient_checkpointing_func(
conv_layer.__call__,
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states | 3,705 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
class WavLMFeatureExtractor(WavLMFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
) | 3,706 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
class WavLMFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states | 3,707 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
class WavLMAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
num_buckets: int = 320,
max_distance: int = 800,
has_relative_position_bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.out_proj = nn.Linear(embed_dim, embed_dim) | 3,708 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/modeling_wavlm.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.