potsawee commited on
Commit
45bf5c5
·
verified ·
1 Parent(s): d7a2a0f

Upload modeling_mimi_clean.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_mimi_clean.py +1784 -0
modeling_mimi_clean.py ADDED
@@ -0,0 +1,1784 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Kyutai, and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Mimi model - Clean original implementation."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from transformers.activations import ACT2FN
26
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
27
+ from transformers.masking_utils import create_causal_mask
28
+ from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available
29
+ from transformers.modeling_layers import GradientCheckpointingLayer
30
+ from transformers.modeling_outputs import BaseModelOutputWithPast
31
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
32
+ from transformers.modeling_utils import PreTrainedModel
33
+ from transformers.utils import ModelOutput, auto_docstring, logging
34
+ from configuration_mimi import MimiConfig
35
+
36
+
37
+ if is_flash_attn_available():
38
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ @dataclass
45
+ @auto_docstring
46
+ class MimiOutput(ModelOutput):
47
+ r"""
48
+ audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
49
+ Discret code embeddings computed using `model.encode`.
50
+ audio_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
51
+ Decoded audio values, obtained using the decoder part of Mimi.
52
+ encoder_past_key_values (`Cache`, *optional*):
53
+ Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
54
+ This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
55
+
56
+ The model will output the same cache format that is fed as input.
57
+
58
+ If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
59
+ have their past key value states given to this model).
60
+ decoder_past_key_values (`Cache`, *optional*):
61
+ Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
62
+ This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
63
+
64
+ The model will output the same cache format that is fed as input.
65
+
66
+ If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
67
+ have their past key value states given to this model).
68
+ """
69
+
70
+ audio_codes: Optional[torch.LongTensor] = None
71
+ audio_values: Optional[torch.FloatTensor] = None
72
+ encoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None
73
+ decoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None
74
+
75
+
76
+ class MimiConv1dPaddingCache:
77
+ """
78
+ Padding cache for MimiConv1d causal convolutions in order to support streaming via cache padding.
79
+ See: https://arxiv.org/pdf/2005.06720 & https://arxiv.org/pdf/2204.07064
80
+
81
+ A padding cache is a list of cached partial hidden states for each convolution layer.
82
+ Hidden states are cached from the previous call to the MimiConv1d forward pass, given the padding size.
83
+ """
84
+
85
+ def __init__(
86
+ self,
87
+ num_layers: int,
88
+ per_layer_padding: list[int],
89
+ per_layer_padding_mode: list[str],
90
+ per_layer_in_channels: list[int],
91
+ ):
92
+ # ensure correct number of layers for each arg
93
+ from_args_num_layers = {len(per_layer_padding), len(per_layer_padding_mode), len(per_layer_in_channels)}
94
+
95
+ if len(from_args_num_layers) != 1 or from_args_num_layers.pop() != num_layers:
96
+ raise ValueError(
97
+ f"Expected `num_layers` ({num_layers}) values in `per_layer_padding`, `per_layer_padding_mode` and `per_layer_in_channels`"
98
+ )
99
+ elif not all(mode in ["constant", "replicate"] for mode in per_layer_padding_mode):
100
+ raise NotImplementedError(
101
+ "`padding_cache` is not supported for convolutions using other than `constant` or `replicate` padding mode"
102
+ )
103
+
104
+ self.per_layer_padding = per_layer_padding
105
+ self.per_layer_padding_mode = per_layer_padding_mode
106
+ self.per_layer_in_channels = per_layer_in_channels
107
+ self.per_layer_is_init = [True] * num_layers
108
+
109
+ self.padding_cache = [None] * num_layers
110
+
111
+ def update(self, hidden_states: torch.Tensor, layer_idx: int):
112
+ """
113
+ Updates the padding cache with the new padding states for the layer `layer_idx` and returns the current cache.
114
+
115
+ Parameters:
116
+ hidden_states (`torch.Tensor`):
117
+ The hidden states to be partially cached.
118
+ layer_idx (`int`):
119
+ The index of the layer to cache the states for.
120
+ Returns:
121
+ `torch.Tensor` or `None`, the current padding cache.
122
+ """
123
+ batch_size, dtype, device = hidden_states.shape[0], hidden_states.dtype, hidden_states.device
124
+ padding = self.per_layer_padding[layer_idx]
125
+ padding_mode = self.per_layer_padding_mode[layer_idx]
126
+ in_channels = self.per_layer_in_channels[layer_idx]
127
+
128
+ if self.padding_cache[layer_idx] is None:
129
+ if padding_mode == "constant":
130
+ current_cache = torch.zeros(
131
+ batch_size,
132
+ in_channels,
133
+ padding,
134
+ device=device,
135
+ dtype=dtype,
136
+ )
137
+ elif padding_mode == "replicate":
138
+ current_cache = (
139
+ torch.ones(
140
+ batch_size,
141
+ in_channels,
142
+ padding,
143
+ device=device,
144
+ dtype=dtype,
145
+ )
146
+ * hidden_states[..., :1]
147
+ )
148
+ else:
149
+ current_cache = self.padding_cache[layer_idx]
150
+
151
+ # update the cache
152
+ if padding > 0:
153
+ padding_states = hidden_states[:, :, -padding:]
154
+ else:
155
+ padding_states = torch.empty(batch_size, in_channels, padding, dtype=dtype, device=device)
156
+ self.padding_cache[layer_idx] = padding_states
157
+
158
+ return current_cache
159
+
160
+
161
+ @dataclass
162
+ @auto_docstring
163
+ class MimiEncoderOutput(ModelOutput):
164
+ r"""
165
+ audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
166
+ Discret code embeddings computed using `model.encode`.
167
+ encoder_past_key_values (`Cache`, *optional*):
168
+ Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
169
+ This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
170
+
171
+ The model will output the same cache format that is fed as input.
172
+
173
+ If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
174
+ have their past key value states given to this model).
175
+ padding_cache (<fill_type>):
176
+ <fill_docstring>
177
+ """
178
+
179
+ audio_codes: Optional[torch.LongTensor] = None
180
+ encoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None
181
+ padding_cache: Optional[MimiConv1dPaddingCache] = None
182
+
183
+
184
+ @dataclass
185
+ @auto_docstring
186
+ class MimiDecoderOutput(ModelOutput):
187
+ r"""
188
+ audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
189
+ Decoded audio values, obtained using the decoder part of Mimi.
190
+ decoder_past_key_values (`Cache`, *optional*):
191
+ Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
192
+ This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
193
+
194
+ The model will output the same cache format that is fed as input.
195
+
196
+ If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
197
+ have their past key value states given to this model).
198
+ """
199
+
200
+ audio_values: Optional[torch.FloatTensor] = None
201
+ decoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None
202
+
203
+
204
+ class MimiConv1d(nn.Module):
205
+ """Conv1d with asymmetric or causal padding and normalization."""
206
+
207
+ def __init__(
208
+ self,
209
+ config,
210
+ in_channels: int,
211
+ out_channels: int,
212
+ kernel_size: int,
213
+ stride: int = 1,
214
+ dilation: int = 1,
215
+ groups: int = 1,
216
+ pad_mode: Optional[str] = None,
217
+ bias: bool = True,
218
+ layer_idx: Optional[int] = None,
219
+ ):
220
+ super().__init__()
221
+ self.causal = config.use_causal_conv
222
+ self.pad_mode = config.pad_mode if pad_mode is None else pad_mode
223
+ self.layer_idx = layer_idx
224
+ self.in_channels = in_channels
225
+
226
+ # warn user on unusual setup between dilation and stride
227
+ if stride > 1 and dilation > 1:
228
+ logger.warning(
229
+ "MimiConv1d has been initialized with stride > 1 and dilation > 1"
230
+ f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})."
231
+ )
232
+
233
+ self.conv = nn.Conv1d(
234
+ in_channels, out_channels, kernel_size, stride, dilation=dilation, groups=groups, bias=bias
235
+ )
236
+
237
+ kernel_size = self.conv.kernel_size[0]
238
+ stride = torch.tensor(self.conv.stride[0], dtype=torch.int64)
239
+ dilation = self.conv.dilation[0]
240
+
241
+ # Effective kernel size with dilations.
242
+ kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64)
243
+
244
+ self.register_buffer("stride", stride, persistent=False)
245
+ self.register_buffer("kernel_size", kernel_size, persistent=False)
246
+ self.register_buffer("padding_total", kernel_size - stride, persistent=False)
247
+
248
+ # Asymmetric padding required for odd strides
249
+ self.padding_right = self.padding_total // 2
250
+ self.padding_left = self.padding_total - self.padding_right
251
+
252
+ def apply_weight_norm(self):
253
+ weight_norm = nn.utils.weight_norm
254
+ if hasattr(nn.utils.parametrizations, "weight_norm"):
255
+ weight_norm = nn.utils.parametrizations.weight_norm
256
+
257
+ weight_norm(self.conv)
258
+
259
+ def remove_weight_norm(self):
260
+ nn.utils.remove_weight_norm(self.conv)
261
+
262
+ # Copied from transformers.models.encodec.modeling_encodec.EncodecConv1d._get_extra_padding_for_conv1d
263
+ def _get_extra_padding_for_conv1d(
264
+ self,
265
+ hidden_states: torch.Tensor,
266
+ ) -> torch.Tensor:
267
+ """See `pad_for_conv1d`."""
268
+ length = hidden_states.shape[-1]
269
+ n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1
270
+ n_frames = torch.ceil(n_frames).to(torch.int64) - 1
271
+ ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total
272
+
273
+ return ideal_length - length
274
+
275
+ @staticmethod
276
+ # Copied from transformers.models.encodec.modeling_encodec.EncodecConv1d._pad1d
277
+ def _pad1d(hidden_states: torch.Tensor, paddings: tuple[int, int], mode: str = "zero", value: float = 0.0):
278
+ """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input.
279
+ If this is the case, we insert extra 0 padding to the right before the reflection happens.
280
+ """
281
+ length = hidden_states.shape[-1]
282
+ padding_left, padding_right = paddings
283
+ if not mode == "reflect":
284
+ return nn.functional.pad(hidden_states, paddings, mode, value)
285
+
286
+ max_pad = max(padding_left, padding_right)
287
+ extra_pad = 0
288
+ if length <= max_pad:
289
+ extra_pad = max_pad - length + 1
290
+ hidden_states = nn.functional.pad(hidden_states, (0, extra_pad))
291
+ padded = nn.functional.pad(hidden_states, paddings, mode, value)
292
+ end = padded.shape[-1] - extra_pad
293
+ return padded[..., :end]
294
+
295
+ def _get_output_length(self, input_length: torch.LongTensor) -> torch.LongTensor:
296
+ """
297
+ Return the length of the output of the MimiConv1d.
298
+ """
299
+ # padding size
300
+ n_frames = (input_length - self.kernel_size + self.padding_total) / self.stride + 1
301
+ n_frames = torch.ceil(n_frames).to(torch.int64) - 1
302
+ ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total
303
+ extra_padding = ideal_length - input_length
304
+
305
+ if self.causal:
306
+ padding_left = self.padding_total
307
+ padding_right = extra_padding
308
+ else:
309
+ padding_left = self.padding_left
310
+ padding_right = self.padding_right + extra_padding
311
+
312
+ # padding
313
+ input_length = input_length + padding_left + padding_right
314
+
315
+ # conv
316
+ output_lenght = (
317
+ input_length + 2 * self.conv.padding[0] - self.conv.dilation[0] * (self.conv.kernel_size[0] - 1) - 1
318
+ ) // self.conv.stride[0] + 1
319
+ return output_lenght
320
+
321
+ def forward(self, hidden_states, padding_cache=None):
322
+ extra_padding = self._get_extra_padding_for_conv1d(hidden_states)
323
+
324
+ if not self.causal and padding_cache is not None:
325
+ raise ValueError("`padding_cache` is not supported for non-causal convolutions.")
326
+
327
+ if self.causal and padding_cache is not None:
328
+ layer_padding_cache = padding_cache.update(hidden_states, self.layer_idx)
329
+ hidden_states = torch.cat([layer_padding_cache, hidden_states], dim=2)
330
+
331
+ elif self.causal:
332
+ # Left padding for causal
333
+ hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode)
334
+
335
+ else:
336
+ hidden_states = self._pad1d(
337
+ hidden_states, (self.padding_left, self.padding_right + extra_padding), mode=self.pad_mode
338
+ )
339
+
340
+ hidden_states = self.conv(hidden_states)
341
+ return hidden_states
342
+
343
+
344
+ class MimiConvTranspose1d(nn.Module):
345
+ """ConvTranspose1d with asymmetric or causal padding and normalization."""
346
+
347
+ def __init__(
348
+ self,
349
+ config,
350
+ in_channels: int,
351
+ out_channels: int,
352
+ kernel_size: int,
353
+ stride: int = 1,
354
+ groups: int = 1,
355
+ bias=True,
356
+ ):
357
+ super().__init__()
358
+ self.causal = config.use_causal_conv
359
+ self.trim_right_ratio = config.trim_right_ratio
360
+ self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride, groups=groups, bias=bias)
361
+
362
+ if not (self.causal or self.trim_right_ratio == 1.0):
363
+ raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions")
364
+
365
+ kernel_size = self.conv.kernel_size[0]
366
+ stride = self.conv.stride[0]
367
+ padding_total = kernel_size - stride
368
+
369
+ # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
370
+ # removed at the very end, when keeping only the right length for the output,
371
+ # as removing it here would require also passing the length at the matching layer
372
+ # in the encoder.
373
+ if self.causal:
374
+ # Trim the padding on the right according to the specified ratio
375
+ # if trim_right_ratio = 1.0, trim everything from right
376
+ self.padding_right = math.ceil(padding_total * self.trim_right_ratio)
377
+ else:
378
+ # Asymmetric padding required for odd strides
379
+ self.padding_right = padding_total // 2
380
+
381
+ self.padding_left = padding_total - self.padding_right
382
+
383
+ def apply_weight_norm(self):
384
+ weight_norm = nn.utils.weight_norm
385
+ if hasattr(nn.utils.parametrizations, "weight_norm"):
386
+ weight_norm = nn.utils.parametrizations.weight_norm
387
+
388
+ weight_norm(self.conv)
389
+
390
+ def remove_weight_norm(self):
391
+ nn.utils.remove_weight_norm(self.conv)
392
+
393
+ def forward(self, hidden_states):
394
+ hidden_states = self.conv(hidden_states)
395
+
396
+ # unpad
397
+ end = hidden_states.shape[-1] - self.padding_right
398
+ hidden_states = hidden_states[..., self.padding_left : end]
399
+ return hidden_states
400
+
401
+
402
+ class MimiResnetBlock(nn.Module):
403
+ """
404
+ Residual block from SEANet model as used by Mimi.
405
+ """
406
+
407
+ def __init__(self, config: MimiConfig, dim: int, dilations: list[int]):
408
+ super().__init__()
409
+ kernel_sizes = (config.residual_kernel_size, 1)
410
+ if len(kernel_sizes) != len(dilations):
411
+ raise ValueError("Number of kernel sizes should match number of dilations")
412
+
413
+ hidden = dim // config.compress
414
+ block = []
415
+ for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
416
+ in_chs = dim if i == 0 else hidden
417
+ out_chs = dim if i == len(kernel_sizes) - 1 else hidden
418
+ block += [nn.ELU()]
419
+ block += [MimiConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)]
420
+ self.block = nn.ModuleList(block)
421
+
422
+ if config.use_conv_shortcut:
423
+ self.shortcut = MimiConv1d(config, dim, dim, kernel_size=1)
424
+ else:
425
+ self.shortcut = nn.Identity()
426
+
427
+ def forward(self, hidden_states, padding_cache=None):
428
+ residual = hidden_states
429
+
430
+ for layer in self.block:
431
+ if isinstance(layer, MimiConv1d):
432
+ hidden_states = layer(hidden_states, padding_cache=padding_cache)
433
+ else:
434
+ hidden_states = layer(hidden_states)
435
+
436
+ if isinstance(self.shortcut, MimiConv1d):
437
+ residual = self.shortcut(residual, padding_cache=padding_cache)
438
+ else:
439
+ residual = self.shortcut(residual)
440
+
441
+ return residual + hidden_states
442
+
443
+
444
+ class MimiEncoder(nn.Module):
445
+ """SEANet encoder as used by Mimi."""
446
+
447
+ def __init__(self, config: MimiConfig):
448
+ super().__init__()
449
+ model = [MimiConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)]
450
+ scaling = 1
451
+
452
+ # keep track of MimiConv1d submodule layer names for easy encoded length computation
453
+ mimiconv1d_layer_names = ["layers.0"]
454
+
455
+ # Downsample to raw audio scale
456
+ for ratio in reversed(config.upsampling_ratios):
457
+ current_scale = scaling * config.num_filters
458
+ # Add residual layers
459
+ for j in range(config.num_residual_layers):
460
+ mimiconv1d_layer_names.extend([f"layers.{len(model)}.block.1", f"layers.{len(model)}.block.3"])
461
+ model += [MimiResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])]
462
+ # Add downsampling layers
463
+ model += [nn.ELU()]
464
+ mimiconv1d_layer_names.append(f"layers.{len(model)}")
465
+ model += [MimiConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)]
466
+ scaling *= 2
467
+
468
+ model += [nn.ELU()]
469
+ mimiconv1d_layer_names.append(f"layers.{len(model)}")
470
+ model += [MimiConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)]
471
+
472
+ self.layers = nn.ModuleList(model)
473
+ self._mimiconv1d_layer_names = mimiconv1d_layer_names
474
+
475
+ # initialize layer_idx for MimiConv1d submodules, necessary for padding_cache
476
+ for layer_idx, layername in enumerate(self._mimiconv1d_layer_names):
477
+ conv_layer = self.get_submodule(layername)
478
+ setattr(conv_layer, "layer_idx", layer_idx)
479
+
480
+ def forward(self, hidden_states, padding_cache=None):
481
+ for layer in self.layers:
482
+ if isinstance(layer, (MimiConv1d, MimiResnetBlock)):
483
+ hidden_states = layer(hidden_states, padding_cache=padding_cache)
484
+ else:
485
+ hidden_states = layer(hidden_states)
486
+ return hidden_states
487
+
488
+
489
+ class MimiLayerScale(nn.Module):
490
+ """Layer scale from [Touvron et al 2021] (https://huggingface.co/papers/2103.17239).
491
+ This rescales diagonally the residual outputs close to 0, with a learnt scale.
492
+ """
493
+
494
+ def __init__(self, config):
495
+ super().__init__()
496
+ channels = config.hidden_size
497
+ initial_scale = config.layer_scale_initial_scale
498
+ self.scale = nn.Parameter(torch.full((channels,), initial_scale, requires_grad=True))
499
+
500
+ def forward(self, x: torch.Tensor):
501
+ return self.scale * x
502
+
503
+
504
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Mimi
505
+ class MimiRotaryEmbedding(nn.Module):
506
+ def __init__(self, config: MimiConfig, device=None):
507
+ super().__init__()
508
+ # BC: "rope_type" was originally "type"
509
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
510
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
511
+ else:
512
+ self.rope_type = "default"
513
+ self.max_seq_len_cached = config.max_position_embeddings
514
+ self.original_max_seq_len = config.max_position_embeddings
515
+
516
+ self.config = config
517
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
518
+
519
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
520
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
521
+ self.original_inv_freq = self.inv_freq
522
+
523
+ @torch.no_grad()
524
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
525
+ def forward(self, x, position_ids):
526
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
527
+ position_ids_expanded = position_ids[:, None, :].float()
528
+
529
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
530
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
531
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
532
+ emb = torch.cat((freqs, freqs), dim=-1)
533
+ cos = emb.cos() * self.attention_scaling
534
+ sin = emb.sin() * self.attention_scaling
535
+
536
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
537
+
538
+
539
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
540
+ def rotate_half(x):
541
+ """Rotates half the hidden dims of the input."""
542
+ x1 = x[..., : x.shape[-1] // 2]
543
+ x2 = x[..., x.shape[-1] // 2 :]
544
+ return torch.cat((-x2, x1), dim=-1)
545
+
546
+
547
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
548
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
549
+ """Applies Rotary Position Embedding to the query and key tensors.
550
+
551
+ Args:
552
+ q (`torch.Tensor`): The query tensor.
553
+ k (`torch.Tensor`): The key tensor.
554
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
555
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
556
+ position_ids (`torch.Tensor`, *optional*):
557
+ Deprecated and unused.
558
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
559
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
560
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
561
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
562
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
563
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
564
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
565
+ Returns:
566
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
567
+ """
568
+ cos = cos.unsqueeze(unsqueeze_dim)
569
+ sin = sin.unsqueeze(unsqueeze_dim)
570
+ q_embed = (q * cos) + (rotate_half(q) * sin)
571
+ k_embed = (k * cos) + (rotate_half(k) * sin)
572
+ return q_embed, k_embed
573
+
574
+
575
+ class MimiMLP(nn.Module):
576
+ def __init__(self, config):
577
+ super().__init__()
578
+ self.config = config
579
+ self.activation_fn = ACT2FN[config.hidden_act]
580
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
581
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
582
+
583
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP.forward
584
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
585
+ hidden_states = self.fc1(hidden_states)
586
+ hidden_states = self.activation_fn(hidden_states)
587
+ hidden_states = self.fc2(hidden_states)
588
+ return hidden_states
589
+
590
+
591
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
592
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
593
+ """
594
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
595
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
596
+ """
597
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
598
+ if n_rep == 1:
599
+ return hidden_states
600
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
601
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
602
+
603
+
604
+ # copied from transformers.models.gemma.modeling_gemma.GemmaAttention with Gemma->Mimi
605
+ # no longer copied after attention refactors
606
+ class MimiAttention(nn.Module):
607
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
608
+
609
+ def __init__(self, config: MimiConfig, layer_idx: Optional[int] = None):
610
+ super().__init__()
611
+ self.config = config
612
+ self.layer_idx = layer_idx
613
+ if layer_idx is None:
614
+ logger.warning_once(
615
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
616
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
617
+ "when creating this class."
618
+ )
619
+
620
+ self.attention_dropout = config.attention_dropout
621
+ self.hidden_size = config.hidden_size
622
+ self.num_heads = config.num_attention_heads
623
+ self.head_dim = config.head_dim
624
+ self.num_key_value_heads = config.num_key_value_heads
625
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
626
+ self.max_position_embeddings = config.max_position_embeddings
627
+ self.rope_theta = config.rope_theta
628
+ self.is_causal = True
629
+ self.scaling = 1 / math.sqrt(config.head_dim)
630
+
631
+ if self.hidden_size % self.num_heads != 0:
632
+ raise ValueError(
633
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
634
+ f" and `num_heads`: {self.num_heads})."
635
+ )
636
+
637
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
638
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
639
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
640
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
641
+ self.rotary_emb = MimiRotaryEmbedding(config)
642
+ self.sliding_window = config.sliding_window # Ignore copy
643
+
644
+ def forward(
645
+ self,
646
+ hidden_states: torch.Tensor,
647
+ attention_mask: Optional[torch.Tensor] = None,
648
+ position_ids: Optional[torch.LongTensor] = None,
649
+ past_key_value: Optional[Cache] = None,
650
+ output_attentions: bool = False,
651
+ use_cache: bool = False,
652
+ cache_position: Optional[torch.LongTensor] = None,
653
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
654
+ bsz, q_len, _ = hidden_states.size()
655
+
656
+ query_states = self.q_proj(hidden_states)
657
+ key_states = self.k_proj(hidden_states)
658
+ value_states = self.v_proj(hidden_states)
659
+
660
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
661
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
662
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
663
+
664
+ cos, sin = self.rotary_emb(value_states, position_ids)
665
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
666
+
667
+ if past_key_value is not None:
668
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
669
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
670
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
671
+
672
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
673
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
674
+
675
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling
676
+
677
+ if attention_mask is not None: # no matter the length, we just slice it
678
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
679
+ attn_weights = attn_weights + causal_mask
680
+
681
+ # upcast attention to fp32
682
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
683
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
684
+ attn_output = torch.matmul(attn_weights, value_states)
685
+
686
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
687
+ raise ValueError(
688
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
689
+ f" {attn_output.size()}"
690
+ )
691
+
692
+ attn_output = attn_output.transpose(1, 2).contiguous()
693
+
694
+ attn_output = attn_output.view(bsz, q_len, -1)
695
+ attn_output = self.o_proj(attn_output)
696
+
697
+ if not output_attentions:
698
+ attn_weights = None
699
+
700
+ return attn_output, attn_weights, past_key_value
701
+
702
+
703
+ # NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaFlashAttention2 with Gemma->Mimi
704
+ # TODO cyril: modular
705
+ class MimiFlashAttention2(MimiAttention):
706
+ """
707
+ Mimi flash attention module. This module inherits from `MimiAttention` as the weights of the module stays
708
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
709
+ flash attention and deal with padding tokens in case the input contains any of them.
710
+ """
711
+
712
+ def __init__(self, *args, **kwargs):
713
+ super().__init__(*args, **kwargs)
714
+
715
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
716
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
717
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
718
+ self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
719
+
720
+ def forward(
721
+ self,
722
+ hidden_states: torch.Tensor,
723
+ attention_mask: Optional[torch.LongTensor] = None,
724
+ position_ids: Optional[torch.LongTensor] = None,
725
+ past_key_value: Optional[Cache] = None,
726
+ output_attentions: bool = False,
727
+ use_cache: bool = False,
728
+ cache_position: Optional[torch.LongTensor] = None,
729
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
730
+ if isinstance(past_key_value, StaticCache):
731
+ raise ValueError(
732
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
733
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
734
+ )
735
+
736
+ output_attentions = False
737
+
738
+ bsz, q_len, _ = hidden_states.size()
739
+
740
+ query_states = self.q_proj(hidden_states)
741
+ key_states = self.k_proj(hidden_states)
742
+ value_states = self.v_proj(hidden_states)
743
+
744
+ # Flash attention requires the input to have the shape
745
+ # batch_size x seq_length x head_dim x hidden_dim
746
+ # therefore we just need to keep the original shape
747
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
748
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
749
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
750
+
751
+ cos, sin = self.rotary_emb(value_states, position_ids)
752
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
753
+
754
+ if past_key_value is not None:
755
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
756
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
757
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
758
+
759
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
760
+ # to be able to avoid many of these transpose/reshape/view.
761
+ query_states = query_states.transpose(1, 2)
762
+ key_states = key_states.transpose(1, 2)
763
+ value_states = value_states.transpose(1, 2)
764
+
765
+ dropout_rate = self.attention_dropout if self.training else 0.0
766
+
767
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
768
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
769
+ # cast them back in the correct dtype just to be sure everything works as expected.
770
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
771
+ # in fp32. (MimiRMSNorm handles it correctly)
772
+
773
+ input_dtype = query_states.dtype
774
+ device_type = query_states.device.type if query_states.device.type != "mps" else "cpu"
775
+ if input_dtype == torch.float32:
776
+ if torch.is_autocast_enabled():
777
+ target_dtype = (
778
+ torch.get_autocast_dtype(device_type)
779
+ if hasattr(torch, "get_autocast_dtype")
780
+ else torch.get_autocast_gpu_dtype()
781
+ )
782
+ # Handle the case where the model is quantized
783
+ elif hasattr(self.config, "_pre_quantization_dtype"):
784
+ target_dtype = self.config._pre_quantization_dtype
785
+ else:
786
+ target_dtype = self.q_proj.weight.dtype
787
+
788
+ logger.warning_once(
789
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
790
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
791
+ f" {target_dtype}."
792
+ )
793
+
794
+ query_states = query_states.to(target_dtype)
795
+ key_states = key_states.to(target_dtype)
796
+ value_states = value_states.to(target_dtype)
797
+
798
+ attn_output = _flash_attention_forward(
799
+ query_states,
800
+ key_states,
801
+ value_states,
802
+ attention_mask,
803
+ q_len,
804
+ position_ids=position_ids,
805
+ dropout=dropout_rate,
806
+ sliding_window=getattr(self, "sliding_window", None),
807
+ is_causal=self.is_causal,
808
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
809
+ )
810
+
811
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
812
+ attn_output = self.o_proj(attn_output)
813
+
814
+ if not output_attentions:
815
+ attn_weights = None
816
+
817
+ return attn_output, attn_weights, past_key_value
818
+
819
+
820
+ # NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaSdpaAttention with Gemma->Mimi
821
+ # TODO cyril: modular
822
+ class MimiSdpaAttention(MimiAttention):
823
+ """
824
+ Mimi attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
825
+ `MimiAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
826
+ SDPA API.
827
+ """
828
+
829
+ # Adapted from MimiAttention.forward
830
+ def forward(
831
+ self,
832
+ hidden_states: torch.Tensor,
833
+ attention_mask: Optional[torch.Tensor] = None,
834
+ position_ids: Optional[torch.LongTensor] = None,
835
+ past_key_value: Optional[Cache] = None,
836
+ output_attentions: bool = False,
837
+ use_cache: bool = False,
838
+ cache_position: Optional[torch.LongTensor] = None,
839
+ **kwargs,
840
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
841
+ if output_attentions:
842
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
843
+ logger.warning_once(
844
+ "MimiModel is using MimiSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
845
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
846
+ )
847
+ return super().forward(
848
+ hidden_states=hidden_states,
849
+ attention_mask=attention_mask,
850
+ position_ids=position_ids,
851
+ past_key_value=past_key_value,
852
+ output_attentions=output_attentions,
853
+ use_cache=use_cache,
854
+ cache_position=cache_position,
855
+ )
856
+
857
+ bsz, q_len, _ = hidden_states.size()
858
+
859
+ query_states = self.q_proj(hidden_states)
860
+ key_states = self.k_proj(hidden_states)
861
+ value_states = self.v_proj(hidden_states)
862
+
863
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
864
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
865
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
866
+
867
+ cos, sin = self.rotary_emb(value_states, position_ids)
868
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
869
+
870
+ if past_key_value is not None:
871
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
872
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
873
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
874
+
875
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
876
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
877
+
878
+ causal_mask = attention_mask
879
+ if attention_mask is not None:
880
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
881
+
882
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
883
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
884
+ if query_states.device.type == "cuda" and causal_mask is not None:
885
+ query_states = query_states.contiguous()
886
+ key_states = key_states.contiguous()
887
+ value_states = value_states.contiguous()
888
+
889
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
890
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
891
+ is_causal = True if causal_mask is None and q_len > 1 else False
892
+
893
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
894
+ query_states,
895
+ key_states,
896
+ value_states,
897
+ attn_mask=causal_mask,
898
+ dropout_p=self.attention_dropout if self.training else 0.0,
899
+ is_causal=is_causal,
900
+ )
901
+
902
+ attn_output = attn_output.transpose(1, 2).contiguous()
903
+ attn_output = attn_output.view(bsz, q_len, -1)
904
+
905
+ attn_output = self.o_proj(attn_output)
906
+
907
+ return attn_output, None, past_key_value
908
+
909
+
910
+ MIMI_ATTENTION_CLASSES = {
911
+ "eager": MimiAttention,
912
+ "flash_attention_2": MimiFlashAttention2,
913
+ "sdpa": MimiSdpaAttention,
914
+ }
915
+
916
+
917
+ class MimiTransformerLayer(GradientCheckpointingLayer):
918
+ def __init__(self, config: MimiConfig, layer_idx: int):
919
+ super().__init__()
920
+ self.hidden_size = config.hidden_size
921
+
922
+ self.self_attn = MIMI_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
923
+
924
+ self.mlp = MimiMLP(config)
925
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
926
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
927
+ self.self_attn_layer_scale = MimiLayerScale(config)
928
+ self.mlp_layer_scale = MimiLayerScale(config)
929
+
930
+ def forward(
931
+ self,
932
+ hidden_states: torch.Tensor,
933
+ attention_mask: Optional[torch.Tensor] = None,
934
+ position_ids: Optional[torch.LongTensor] = None,
935
+ past_key_value: Optional[Cache] = None,
936
+ output_attentions: Optional[bool] = False,
937
+ use_cache: Optional[bool] = False,
938
+ cache_position: Optional[torch.LongTensor] = None,
939
+ **kwargs,
940
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
941
+ """
942
+ Args:
943
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
944
+ attention_mask (`torch.FloatTensor`, *optional*):
945
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
946
+ query_sequence_length, key_sequence_length)` if default attention is used.
947
+ output_attentions (`bool`, *optional*):
948
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
949
+ returned tensors for more detail.
950
+ use_cache (`bool`, *optional*):
951
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
952
+ (see `past_key_values`).
953
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
954
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
955
+ Indices depicting the position of the input sequence tokens in the sequence
956
+ kwargs (`dict`, *optional*):
957
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
958
+ into the model
959
+ """
960
+ residual = hidden_states
961
+
962
+ hidden_states = self.input_layernorm(hidden_states)
963
+
964
+ # Self Attention
965
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
966
+ hidden_states=hidden_states,
967
+ attention_mask=attention_mask,
968
+ position_ids=position_ids,
969
+ past_key_value=past_key_value,
970
+ output_attentions=output_attentions,
971
+ use_cache=use_cache,
972
+ cache_position=cache_position,
973
+ **kwargs,
974
+ )
975
+ hidden_states = residual + self.self_attn_layer_scale(hidden_states)
976
+
977
+ # Fully Connected
978
+ residual = hidden_states
979
+ hidden_states = self.post_attention_layernorm(hidden_states)
980
+ hidden_states = self.mlp(hidden_states)
981
+ hidden_states = residual + self.mlp_layer_scale(hidden_states)
982
+
983
+ outputs = (hidden_states,)
984
+
985
+ if output_attentions:
986
+ outputs += (self_attn_weights,)
987
+
988
+ if use_cache:
989
+ outputs += (present_key_value,)
990
+
991
+ return outputs
992
+
993
+
994
+ class MimiTransformerModel(nn.Module):
995
+ """
996
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MimiTransformerLayer`]
997
+
998
+ Args:
999
+ config: MimiConfig
1000
+ """
1001
+
1002
+ def __init__(self, config: MimiConfig):
1003
+ super().__init__()
1004
+
1005
+ self.layers = nn.ModuleList(
1006
+ [MimiTransformerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1007
+ )
1008
+ self._attn_implementation = config._attn_implementation
1009
+
1010
+ self.gradient_checkpointing = False
1011
+ self.config = config
1012
+
1013
+ def forward(
1014
+ self,
1015
+ hidden_states: Optional[torch.LongTensor] = None,
1016
+ attention_mask: Optional[torch.Tensor] = None,
1017
+ position_ids: Optional[torch.LongTensor] = None,
1018
+ past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None,
1019
+ use_cache: Optional[bool] = None,
1020
+ output_attentions: Optional[bool] = None,
1021
+ output_hidden_states: Optional[bool] = None,
1022
+ return_dict: Optional[bool] = None,
1023
+ cache_position: Optional[torch.LongTensor] = None,
1024
+ ) -> Union[tuple, BaseModelOutputWithPast]:
1025
+ """
1026
+ Args:
1027
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1028
+ Embedded representation that will be contextualized by the model
1029
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1030
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1031
+
1032
+ - 1 for tokens that are **not masked**,
1033
+ - 0 for tokens that are **masked**.
1034
+
1035
+ [What are attention masks?](../glossary#attention-mask)
1036
+
1037
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1038
+ [`PreTrainedTokenizer.__call__`] for details.
1039
+
1040
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1041
+ `past_key_values`).
1042
+
1043
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1044
+ and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more
1045
+ information on the default strategy.
1046
+
1047
+ - 1 indicates the head is **not masked**,
1048
+ - 0 indicates the head is **masked**.
1049
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1050
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1051
+ config.n_positions - 1]`.
1052
+
1053
+ [What are position IDs?](../glossary#position-ids)
1054
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1055
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1056
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1057
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1058
+
1059
+ Two formats are allowed:
1060
+ - a [`~cache_utils.Cache`] instance;
1061
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1062
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1063
+ cache format.
1064
+
1065
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1066
+ legacy cache format will be returned.
1067
+
1068
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1069
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1070
+ of shape `(batch_size, sequence_length)`.
1071
+ use_cache (`bool`, *optional*):
1072
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1073
+ `past_key_values`).
1074
+ output_attentions (`bool`, *optional*):
1075
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1076
+ tensors for more detail.
1077
+ output_hidden_states (`bool`, *optional*):
1078
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1079
+ more detail.
1080
+ return_dict (`bool`, *optional*):
1081
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1082
+ """
1083
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1084
+ output_hidden_states = (
1085
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1086
+ )
1087
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1088
+
1089
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1090
+
1091
+ if self.gradient_checkpointing and self.training and use_cache:
1092
+ logger.warning_once(
1093
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1094
+ )
1095
+ use_cache = False
1096
+
1097
+ if use_cache and not isinstance(past_key_values, Cache):
1098
+ if past_key_values is None:
1099
+ past_key_values = DynamicCache()
1100
+ else:
1101
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1102
+ logger.warning_once(
1103
+ "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
1104
+ "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
1105
+ "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
1106
+ )
1107
+
1108
+ if cache_position is None:
1109
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1110
+ cache_position = torch.arange(
1111
+ past_seen_tokens, past_seen_tokens + hidden_states.shape[1], device=hidden_states.device
1112
+ )
1113
+
1114
+ if position_ids is None:
1115
+ position_ids = cache_position.unsqueeze(0)
1116
+
1117
+ causal_mask = create_causal_mask(
1118
+ config=self.config,
1119
+ input_embeds=hidden_states,
1120
+ attention_mask=attention_mask,
1121
+ cache_position=cache_position,
1122
+ past_key_values=past_key_values,
1123
+ position_ids=position_ids,
1124
+ )
1125
+
1126
+ # decoder layers
1127
+ all_hidden_states = () if output_hidden_states else None
1128
+ all_self_attns = () if output_attentions else None
1129
+ next_decoder_cache = None
1130
+
1131
+ for decoder_layer in self.layers:
1132
+ if output_hidden_states:
1133
+ all_hidden_states += (hidden_states,)
1134
+
1135
+ layer_outputs = decoder_layer(
1136
+ hidden_states,
1137
+ attention_mask=causal_mask,
1138
+ position_ids=position_ids,
1139
+ past_key_value=past_key_values,
1140
+ output_attentions=output_attentions,
1141
+ use_cache=use_cache,
1142
+ cache_position=cache_position,
1143
+ )
1144
+
1145
+ hidden_states = layer_outputs[0]
1146
+
1147
+ if use_cache:
1148
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1149
+
1150
+ if output_attentions:
1151
+ all_self_attns += (layer_outputs[1],)
1152
+
1153
+ # add hidden states from the last decoder layer
1154
+ if output_hidden_states:
1155
+ all_hidden_states += (hidden_states,)
1156
+
1157
+ next_cache = next_decoder_cache if use_cache else None
1158
+
1159
+ if not return_dict:
1160
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1161
+
1162
+ return BaseModelOutputWithPast(
1163
+ last_hidden_state=hidden_states,
1164
+ past_key_values=next_cache,
1165
+ hidden_states=all_hidden_states,
1166
+ attentions=all_self_attns,
1167
+ )
1168
+
1169
+
1170
+ class MimiDecoder(nn.Module):
1171
+ """SEANet decoder as used by Mimi."""
1172
+
1173
+ def __init__(self, config: MimiConfig):
1174
+ super().__init__()
1175
+ scaling = int(2 ** len(config.upsampling_ratios))
1176
+ model = [MimiConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)]
1177
+
1178
+ # Upsample to raw audio scale
1179
+ for ratio in config.upsampling_ratios:
1180
+ current_scale = scaling * config.num_filters
1181
+ # Add upsampling layers
1182
+ model += [nn.ELU()]
1183
+ model += [
1184
+ MimiConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio)
1185
+ ]
1186
+ # Add residual layers
1187
+ for j in range(config.num_residual_layers):
1188
+ model += [MimiResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))]
1189
+ scaling //= 2
1190
+
1191
+ # Add final layers
1192
+ model += [nn.ELU()]
1193
+ model += [MimiConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)]
1194
+ self.layers = nn.ModuleList(model)
1195
+
1196
+ # Copied from transformers.models.encodec.modeling_encodec.EncodecDecoder.forward
1197
+ def forward(self, hidden_states):
1198
+ for layer in self.layers:
1199
+ hidden_states = layer(hidden_states)
1200
+ return hidden_states
1201
+
1202
+
1203
+ class MimiEuclideanCodebook(nn.Module):
1204
+ """Codebook with Euclidean distance."""
1205
+
1206
+ def __init__(self, config: MimiConfig, epsilon: float = 1e-5):
1207
+ super().__init__()
1208
+ embed = torch.zeros(config.codebook_size, config.codebook_dim)
1209
+
1210
+ self.codebook_size = config.codebook_size
1211
+
1212
+ self.register_buffer("initialized", torch.tensor([True], dtype=torch.float32))
1213
+ self.register_buffer("cluster_usage", torch.ones(config.codebook_size))
1214
+ self.register_buffer("embed_sum", embed)
1215
+ self._embed = None
1216
+ self.epsilon = epsilon
1217
+
1218
+ @property
1219
+ def embed(self) -> torch.Tensor:
1220
+ if self._embed is None:
1221
+ self._embed = self.embed_sum / self.cluster_usage.clamp(min=self.epsilon)[:, None]
1222
+ return self._embed
1223
+
1224
+ def quantize(self, hidden_states):
1225
+ # Projects each vector in `hidden_states` over the nearest centroid and return its index.
1226
+ # `hidden_states` should be `[N, D]` with `N` the number of input vectors and `D` the dimension.
1227
+ dists = torch.cdist(hidden_states[None].float(), self.embed[None].float(), p=2)[0]
1228
+ embed_ind = dists.argmin(dim=-1)
1229
+ return embed_ind
1230
+
1231
+ # Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.encode
1232
+ def encode(self, hidden_states):
1233
+ shape = hidden_states.shape
1234
+ # pre-process
1235
+ hidden_states = hidden_states.reshape((-1, shape[-1]))
1236
+ # quantize
1237
+ embed_ind = self.quantize(hidden_states)
1238
+ # post-process
1239
+ embed_ind = embed_ind.view(*shape[:-1])
1240
+ return embed_ind
1241
+
1242
+ # Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.decode
1243
+ def decode(self, embed_ind):
1244
+ quantize = nn.functional.embedding(embed_ind, self.embed)
1245
+ return quantize
1246
+
1247
+
1248
+ # Copied from transformers.models.encodec.modeling_encodec.EncodecVectorQuantization with Encodec->Mimi
1249
+ class MimiVectorQuantization(nn.Module):
1250
+ """
1251
+ Vector quantization implementation. Currently supports only euclidean distance.
1252
+ """
1253
+
1254
+ def __init__(self, config: MimiConfig):
1255
+ super().__init__()
1256
+ self.codebook = MimiEuclideanCodebook(config)
1257
+
1258
+ def encode(self, hidden_states):
1259
+ hidden_states = hidden_states.permute(0, 2, 1)
1260
+ embed_in = self.codebook.encode(hidden_states)
1261
+ return embed_in
1262
+
1263
+ def decode(self, embed_ind):
1264
+ quantize = self.codebook.decode(embed_ind)
1265
+ quantize = quantize.permute(0, 2, 1)
1266
+ return quantize
1267
+
1268
+
1269
+ class MimiResidualVectorQuantizer(nn.Module):
1270
+ """Residual Vector Quantizer."""
1271
+
1272
+ def __init__(self, config: MimiConfig, num_quantizers: Optional[int] = None):
1273
+ super().__init__()
1274
+ self.codebook_size = config.codebook_size
1275
+ self.frame_rate = config.frame_rate
1276
+ self.num_quantizers = num_quantizers if num_quantizers is not None else config.num_quantizers
1277
+ self.layers = nn.ModuleList([MimiVectorQuantization(config) for _ in range(self.num_quantizers)])
1278
+
1279
+ self.input_proj = None
1280
+ self.output_proj = None
1281
+ if config.vector_quantization_hidden_dimension != config.hidden_size:
1282
+ self.input_proj = torch.nn.Conv1d(
1283
+ config.hidden_size, config.vector_quantization_hidden_dimension, 1, bias=False
1284
+ )
1285
+ self.output_proj = torch.nn.Conv1d(
1286
+ config.vector_quantization_hidden_dimension, config.hidden_size, 1, bias=False
1287
+ )
1288
+
1289
+ def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[int] = None) -> torch.Tensor:
1290
+ """
1291
+ Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets
1292
+ the appropriate number of quantizers to use and returns indices for each quantizer.
1293
+ """
1294
+ if self.input_proj is not None:
1295
+ embeddings = self.input_proj(embeddings)
1296
+
1297
+ num_quantizers = num_quantizers if num_quantizers is not None else self.num_quantizers
1298
+
1299
+ residual = embeddings
1300
+ all_indices = []
1301
+ for layer in self.layers[:num_quantizers]:
1302
+ indices = layer.encode(residual)
1303
+ quantized = layer.decode(indices)
1304
+ residual = residual - quantized
1305
+ all_indices.append(indices)
1306
+ out_indices = torch.stack(all_indices)
1307
+ return out_indices
1308
+
1309
+ def decode(self, codes: torch.Tensor) -> torch.Tensor:
1310
+ """Decode the given codes of shape [B, K, T] to the quantized representation."""
1311
+ quantized_out = torch.tensor(0.0, device=codes.device)
1312
+ codes = codes.transpose(0, 1)
1313
+ for i, indices in enumerate(codes):
1314
+ layer = self.layers[i]
1315
+ quantized = layer.decode(indices)
1316
+ quantized_out = quantized_out + quantized
1317
+
1318
+ if self.output_proj is not None:
1319
+ quantized_out = self.output_proj(quantized_out)
1320
+ return quantized_out
1321
+
1322
+
1323
+ class MimiSplitResidualVectorQuantizer(nn.Module):
1324
+ """Split Residual Vector Quantizer."""
1325
+
1326
+ def __init__(self, config: MimiConfig):
1327
+ super().__init__()
1328
+ self.codebook_size = config.codebook_size
1329
+ self.frame_rate = config.frame_rate
1330
+ self.max_num_quantizers = config.num_quantizers
1331
+
1332
+ self.num_semantic_quantizers = config.num_semantic_quantizers
1333
+ self.num_acoustic_quantizers = config.num_quantizers - config.num_semantic_quantizers
1334
+
1335
+ self.semantic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_semantic_quantizers)
1336
+ self.acoustic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_acoustic_quantizers)
1337
+
1338
+ def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[float] = None) -> torch.Tensor:
1339
+ """
1340
+ Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets
1341
+ the appropriate number of quantizers to use and returns indices for each quantizer.
1342
+ """
1343
+
1344
+ num_quantizers = self.max_num_quantizers if num_quantizers is None else num_quantizers
1345
+
1346
+ if num_quantizers > self.max_num_quantizers:
1347
+ raise ValueError(
1348
+ f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.max_num_quantizers}, but is currently {num_quantizers}."
1349
+ )
1350
+
1351
+ if num_quantizers < self.num_semantic_quantizers:
1352
+ raise ValueError(
1353
+ f"The number of quantizers (i.e codebooks) asked should be higher than the number of semantic quantizers {self.num_semantic_quantizers}, but is currently {num_quantizers}."
1354
+ )
1355
+
1356
+ # codes is [K, B, T], with T frames, K nb of codebooks.
1357
+ codes = self.semantic_residual_vector_quantizer.encode(embeddings)
1358
+
1359
+ if num_quantizers > self.num_semantic_quantizers:
1360
+ acoustic_codes = self.acoustic_residual_vector_quantizer.encode(
1361
+ embeddings, num_quantizers=num_quantizers - self.num_semantic_quantizers
1362
+ )
1363
+ codes = torch.cat([codes, acoustic_codes], dim=0)
1364
+
1365
+ return codes
1366
+
1367
+ def decode(self, codes: torch.Tensor) -> torch.Tensor:
1368
+ """Decode the given codes to the quantized representation."""
1369
+
1370
+ # The first num_semantic_quantizers codebooks are decoded using the semantic RVQ
1371
+ quantized_out = self.semantic_residual_vector_quantizer.decode(codes[:, : self.num_semantic_quantizers])
1372
+
1373
+ # The rest of the codebooks are decoded using the acoustic RVQ
1374
+ if codes.shape[1] > self.num_semantic_quantizers:
1375
+ quantized_out += self.acoustic_residual_vector_quantizer.decode(codes[:, self.num_semantic_quantizers :])
1376
+ return quantized_out
1377
+
1378
+
1379
+ @auto_docstring
1380
+ class MimiPreTrainedModel(PreTrainedModel):
1381
+ config_class = MimiConfig
1382
+ base_model_prefix = "mimi"
1383
+ main_input_name = "input_values"
1384
+ supports_gradient_checkpointing = True
1385
+ _no_split_modules = ["MimiDecoderLayer"]
1386
+ _skip_keys_device_placement = "past_key_values"
1387
+ _supports_flash_attn_2 = True
1388
+ _supports_sdpa = True
1389
+ _supports_cache_class = True
1390
+ _supports_static_cache = True
1391
+
1392
+ def _init_weights(self, module):
1393
+ """Initialize the weights"""
1394
+ if isinstance(module, nn.Linear):
1395
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
1396
+ if module.bias is not None:
1397
+ module.bias.data.zero_()
1398
+ elif isinstance(module, nn.LayerNorm):
1399
+ module.bias.data.zero_()
1400
+ module.weight.data.fill_(1.0)
1401
+ elif isinstance(module, (nn.Conv1d, nn.ConvTranspose1d)):
1402
+ nn.init.kaiming_normal_(module.weight)
1403
+ if module.bias is not None:
1404
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
1405
+ nn.init.uniform_(module.bias, a=-k, b=k)
1406
+ elif isinstance(module, MimiLayerScale):
1407
+ module.scale.data.fill_(self.config.layer_scale_initial_scale)
1408
+
1409
+
1410
+ @auto_docstring(
1411
+ custom_intro="""
1412
+ The Mimi neural audio codec model.
1413
+ """
1414
+ )
1415
+ class MimiModel(MimiPreTrainedModel):
1416
+ def __init__(self, config: MimiConfig):
1417
+ super().__init__(config)
1418
+ self.config = config
1419
+
1420
+ self.encoder = MimiEncoder(config)
1421
+ self.encoder_transformer = MimiTransformerModel(config)
1422
+
1423
+ self.downsample = None
1424
+ self.upsample = None
1425
+ if config.frame_rate != config.encodec_frame_rate:
1426
+ self.downsample = MimiConv1d(
1427
+ config,
1428
+ config.hidden_size,
1429
+ config.hidden_size,
1430
+ kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate),
1431
+ stride=2,
1432
+ bias=False,
1433
+ pad_mode="replicate",
1434
+ layer_idx=len(self.encoder._mimiconv1d_layer_names),
1435
+ )
1436
+
1437
+ self.upsample = MimiConvTranspose1d(
1438
+ config,
1439
+ config.hidden_size,
1440
+ config.hidden_size,
1441
+ kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate),
1442
+ stride=2,
1443
+ bias=False,
1444
+ groups=config.upsample_groups,
1445
+ )
1446
+
1447
+ self.decoder_transformer = MimiTransformerModel(config)
1448
+ self.decoder = MimiDecoder(config)
1449
+
1450
+ self.quantizer = MimiSplitResidualVectorQuantizer(config)
1451
+
1452
+ self.bits_per_codebook = int(math.log2(self.config.codebook_size))
1453
+ if 2**self.bits_per_codebook != self.config.codebook_size:
1454
+ raise ValueError("The codebook_size must be a power of 2.")
1455
+
1456
+ # Initialize weights and apply final processing
1457
+ self.post_init()
1458
+
1459
+ def get_encoder(self):
1460
+ return self.encoder
1461
+
1462
+ def get_decoder(self):
1463
+ return self.decoder
1464
+
1465
+ def _encode_frame(
1466
+ self,
1467
+ input_values: torch.Tensor,
1468
+ num_quantizers: int,
1469
+ padding_mask: int,
1470
+ past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None,
1471
+ padding_cache: Optional[MimiConv1dPaddingCache] = None,
1472
+ return_dict: Optional[bool] = None,
1473
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
1474
+ """
1475
+ Encodes the given input using the underlying VQVAE. The padding mask is required to compute the correct scale.
1476
+ """
1477
+
1478
+ # import ipdb; ipdb.set_trace()
1479
+ # TODO: @eustlb, let's make the encoder support padding_mask so that batched inputs are supported.
1480
+ embeddings = self.encoder(input_values, padding_cache=padding_cache)
1481
+ # input_values.shape = (1, 1, 24000 * T)
1482
+ # embeddings.shape = (1, 512, 25 * T)
1483
+
1484
+ # TODO: @eustlb, convert the padding mask to attention mask.
1485
+ encoder_outputs = self.encoder_transformer(
1486
+ embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict
1487
+ )
1488
+ # encoder_outputs.last_hidden_state.shape = (1, 25 * T, 512)
1489
+ if return_dict:
1490
+ past_key_values = encoder_outputs.get("past_key_values")
1491
+ elif len(encoder_outputs) > 1:
1492
+ past_key_values = encoder_outputs[1]
1493
+ embeddings = encoder_outputs[0].transpose(1, 2) # (1, 512, 25 * T)
1494
+ embeddings = self.downsample(embeddings, padding_cache=padding_cache)
1495
+ # embeddings.shape = (1, 512, 12.5 * T)
1496
+
1497
+ codes = self.quantizer.encode(embeddings, num_quantizers)
1498
+ codes = codes.transpose(0, 1)
1499
+ # codes.shape = (1, 32, 12.5 * T)
1500
+
1501
+ return codes, past_key_values, padding_cache
1502
+
1503
+ def get_encoded_length(self, input_length: torch.LongTensor) -> torch.LongTensor:
1504
+ """
1505
+ Return the number of frames of the encoded audio waveform.
1506
+ """
1507
+ output_length = input_length
1508
+
1509
+ # encoder
1510
+ for layer_name in self.encoder._mimiconv1d_layer_names:
1511
+ output_length = self.encoder.get_submodule(layer_name)._get_output_length(output_length)
1512
+
1513
+ # downsample
1514
+ output_length = self.downsample._get_output_length(output_length)
1515
+
1516
+ return output_length
1517
+
1518
+ def get_audio_codes_mask(self, padding_mask: torch.Tensor, padding_side: str = "right"):
1519
+ """
1520
+ Get the mask for the audio codes from the original padding mask.
1521
+ """
1522
+ encoded_lengths = self.get_encoded_length(padding_mask.sum(dim=-1))
1523
+
1524
+ audio_codes_mask = torch.arange(encoded_lengths.max(), device=encoded_lengths.device).expand(
1525
+ len(encoded_lengths), -1
1526
+ )
1527
+ audio_codes_mask = audio_codes_mask < encoded_lengths.unsqueeze(1)
1528
+ audio_codes_mask = audio_codes_mask.to(padding_mask.device)
1529
+
1530
+ if padding_side == "right":
1531
+ return audio_codes_mask
1532
+ else:
1533
+ return audio_codes_mask.flip(dims=[-1])
1534
+
1535
+ def encode(
1536
+ self,
1537
+ input_values: torch.Tensor,
1538
+ padding_mask: Optional[torch.Tensor] = None,
1539
+ num_quantizers: Optional[float] = None,
1540
+ encoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None,
1541
+ padding_cache: Optional[MimiConv1dPaddingCache] = None,
1542
+ use_streaming: Optional[bool] = None,
1543
+ return_dict: Optional[bool] = None,
1544
+ ) -> Union[tuple[torch.Tensor, Optional[torch.Tensor]], MimiEncoderOutput]:
1545
+ """
1546
+ Encodes the input audio waveform into discrete codes.
1547
+
1548
+ Args:
1549
+ input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
1550
+ Float values of the input audio waveform.
1551
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
1552
+ Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
1553
+ for *masked*.
1554
+ num_quantizers (`int`, *optional*):
1555
+ Number of quantizers (i.e codebooks) to use. By default, all quantizers are used.
1556
+ encoder_past_key_values (`Cache`, *optional*):
1557
+ Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
1558
+ This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1559
+
1560
+ The model will output the same cache format that is fed as input.
1561
+
1562
+ If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
1563
+ have their past key value states given to this model).
1564
+ return_dict (`bool`, *optional*):
1565
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1566
+
1567
+ Returns:
1568
+ `codebook` of shape `[batch_size, num_codebooks, frames]`, the discrete encoded codes for the input audio waveform.
1569
+ """
1570
+ # import ipdb; ipdb.set_trace()
1571
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1572
+ use_streaming = use_streaming if use_streaming is not None else self.config.use_streaming
1573
+
1574
+ num_quantizers = self.config.num_quantizers if num_quantizers is None else num_quantizers
1575
+
1576
+ if num_quantizers > self.config.num_quantizers:
1577
+ raise ValueError(
1578
+ f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.config.num_quantizers}, but is currently {num_quantizers}."
1579
+ )
1580
+
1581
+ _, channels, input_length = input_values.shape
1582
+
1583
+ if channels < 1 or channels > 2:
1584
+ raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}")
1585
+
1586
+ if padding_mask is None:
1587
+ padding_mask = torch.ones_like(input_values).bool()
1588
+
1589
+ if use_streaming and padding_cache is None:
1590
+ per_layer_padding, per_layer_padding_mode, per_layer_in_channels = [], [], []
1591
+ for layer_name in self.encoder._mimiconv1d_layer_names:
1592
+ per_layer_padding.append(self.encoder.get_submodule(layer_name).padding_total)
1593
+ per_layer_padding_mode.append(self.encoder.get_submodule(layer_name).pad_mode)
1594
+ per_layer_in_channels.append(self.encoder.get_submodule(layer_name).in_channels)
1595
+
1596
+ # downsample layer
1597
+ per_layer_padding.append(self.downsample.padding_total)
1598
+ per_layer_padding_mode.append(self.downsample.pad_mode)
1599
+ per_layer_in_channels.append(self.downsample.in_channels)
1600
+
1601
+ padding_cache = MimiConv1dPaddingCache(
1602
+ num_layers=len(self.encoder._mimiconv1d_layer_names) + 1,
1603
+ per_layer_padding=per_layer_padding,
1604
+ per_layer_padding_mode=per_layer_padding_mode,
1605
+ per_layer_in_channels=per_layer_in_channels,
1606
+ )
1607
+
1608
+ encoded_frames, encoder_past_key_values, padding_cache = self._encode_frame(
1609
+ input_values,
1610
+ num_quantizers,
1611
+ padding_mask.bool(),
1612
+ past_key_values=encoder_past_key_values,
1613
+ padding_cache=padding_cache,
1614
+ return_dict=return_dict,
1615
+ )
1616
+
1617
+ if not return_dict:
1618
+ return (
1619
+ encoded_frames,
1620
+ encoder_past_key_values,
1621
+ padding_cache,
1622
+ )
1623
+
1624
+ return MimiEncoderOutput(encoded_frames, encoder_past_key_values, padding_cache)
1625
+
1626
+ def _decode_frame(
1627
+ self,
1628
+ codes: torch.Tensor,
1629
+ past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None,
1630
+ return_dict: Optional[bool] = None,
1631
+ ) -> torch.Tensor:
1632
+ embeddings = self.quantizer.decode(codes)
1633
+
1634
+ embeddings = self.upsample(embeddings)
1635
+ decoder_outputs = self.decoder_transformer(
1636
+ embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict
1637
+ )
1638
+ if return_dict:
1639
+ past_key_values = decoder_outputs.get("past_key_values")
1640
+ elif len(decoder_outputs) > 1:
1641
+ past_key_values = decoder_outputs[1]
1642
+ embeddings = decoder_outputs[0].transpose(1, 2)
1643
+ outputs = self.decoder(embeddings)
1644
+ return outputs, past_key_values
1645
+
1646
+ def decode(
1647
+ self,
1648
+ audio_codes: torch.Tensor,
1649
+ padding_mask: Optional[torch.Tensor] = None,
1650
+ decoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None,
1651
+ return_dict: Optional[bool] = None,
1652
+ ) -> Union[tuple[torch.Tensor, torch.Tensor], MimiDecoderOutput]:
1653
+ """
1654
+ Decodes the given frames into an output audio waveform.
1655
+
1656
+ Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
1657
+ trimmed.
1658
+
1659
+ Args:
1660
+ audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
1661
+ Discret code embeddings computed using `model.encode`.
1662
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
1663
+ Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
1664
+ for *masked*.
1665
+ decoder_past_key_values (`Cache`, *optional*):
1666
+ Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
1667
+ This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1668
+
1669
+ The model will output the same cache format that is fed as input.
1670
+
1671
+ If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
1672
+ have their past key value states given to this model).
1673
+ return_dict (`bool`, *optional*):
1674
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1675
+
1676
+ """
1677
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1678
+
1679
+ audio_values, decoder_past_key_values = self._decode_frame(
1680
+ audio_codes, past_key_values=decoder_past_key_values, return_dict=return_dict
1681
+ )
1682
+
1683
+ # truncate based on padding mask
1684
+ if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]:
1685
+ audio_values = audio_values[..., : padding_mask.shape[-1]]
1686
+
1687
+ if not return_dict:
1688
+ return (
1689
+ audio_values,
1690
+ decoder_past_key_values,
1691
+ )
1692
+ return MimiDecoderOutput(audio_values, decoder_past_key_values)
1693
+
1694
+ @auto_docstring
1695
+ def forward(
1696
+ self,
1697
+ input_values: torch.Tensor,
1698
+ padding_mask: Optional[torch.Tensor] = None,
1699
+ num_quantizers: Optional[int] = None,
1700
+ audio_codes: Optional[torch.Tensor] = None,
1701
+ encoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None,
1702
+ decoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None,
1703
+ return_dict: Optional[bool] = None,
1704
+ ) -> Union[tuple[torch.Tensor, torch.Tensor], MimiOutput]:
1705
+ r"""
1706
+ input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
1707
+ Raw audio input converted to Float.
1708
+ padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1709
+ Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
1710
+ for *masked*.
1711
+ num_quantizers (`int`, *optional*):
1712
+ Number of quantizers (i.e codebooks) to use. By default, all quantizers are used.
1713
+ audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
1714
+ Discret code embeddings computed using `model.encode`.
1715
+ encoder_past_key_values (`Cache`, *optional*):
1716
+ Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
1717
+ This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1718
+
1719
+ The model will output the same cache format that is fed as input.
1720
+
1721
+ If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
1722
+ have their past key value states given to this model).
1723
+ decoder_past_key_values (`Cache`, *optional*):
1724
+ Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
1725
+ This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1726
+
1727
+ The model will output the same cache format that is fed as input.
1728
+
1729
+ If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
1730
+ have their past key value states given to this model).
1731
+
1732
+ Examples:
1733
+
1734
+ ```python
1735
+ >>> from datasets import load_dataset
1736
+ >>> from transformers import AutoFeatureExtractor, MimiModel
1737
+
1738
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
1739
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
1740
+
1741
+ >>> model_id = "kyutai/mimi"
1742
+ >>> model = MimiModel.from_pretrained(model_id)
1743
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
1744
+
1745
+ >>> inputs = feature_extractor(raw_audio=audio_sample, return_tensors="pt")
1746
+
1747
+ >>> outputs = model(**inputs)
1748
+ >>> audio_codes = outputs.audio_codes
1749
+ >>> audio_values = outputs.audio_values
1750
+ ```"""
1751
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1752
+
1753
+ if padding_mask is None:
1754
+ padding_mask = torch.ones_like(input_values).bool()
1755
+
1756
+ if audio_codes is None:
1757
+ encoder_outputs = self.encode(
1758
+ input_values, padding_mask, num_quantizers, encoder_past_key_values, return_dict=return_dict
1759
+ )
1760
+ audio_codes = encoder_outputs[0]
1761
+ if return_dict:
1762
+ encoder_past_key_values = encoder_outputs.get("past_key_values")
1763
+ elif len(encoder_outputs) > 1:
1764
+ encoder_past_key_values = encoder_outputs[1]
1765
+
1766
+ decoder_outputs = self.decode(audio_codes, padding_mask, decoder_past_key_values, return_dict=return_dict)
1767
+ audio_values = decoder_outputs[0]
1768
+ if return_dict:
1769
+ decoder_past_key_values = decoder_outputs.get("past_key_values")
1770
+ elif len(decoder_outputs) > 1:
1771
+ decoder_past_key_values = decoder_outputs[1]
1772
+
1773
+ if not return_dict:
1774
+ return (audio_codes, audio_values, encoder_past_key_values, decoder_past_key_values)
1775
+
1776
+ return MimiOutput(
1777
+ audio_codes=audio_codes,
1778
+ audio_values=audio_values,
1779
+ encoder_past_key_values=encoder_past_key_values,
1780
+ decoder_past_key_values=decoder_past_key_values,
1781
+ )
1782
+
1783
+
1784
+ __all__ = ["MimiModel", "MimiPreTrainedModel"]