potsawee commited on
Commit
deb8802
·
verified ·
1 Parent(s): 45b7e2b

Upload configuration_mimi.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_mimi.py +279 -0
configuration_mimi.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Mimi model configuration"""
16
+
17
+ import math
18
+
19
+ import numpy as np
20
+
21
+ from transformers.configuration_utils import PretrainedConfig
22
+ from transformers.utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class MimiConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of an [`MimiModel`]. It is used to instantiate a
31
+ Mimi model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the
33
+ [kyutai/mimi](https://huggingface.co/kyutai/mimi) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ sampling_rate (`int`, *optional*, defaults to 24000):
40
+ The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
41
+ frame_rate (`float`, *optional*):
42
+ Should be computed from the other parameters, yet kept for backward compatibility.
43
+ audio_channels (`int`, *optional*, defaults to 1):
44
+ Number of channels in the audio data. Either 1 for mono or 2 for stereo.
45
+ hidden_size (`int`, *optional*, defaults to 512):
46
+ Intermediate representation dimension.
47
+ num_filters (`int`, *optional*, defaults to 64):
48
+ Number of convolution kernels of first `MimiConv1d` down sampling layer.
49
+ num_residual_layers (`int`, *optional*, defaults to 1):
50
+ Number of residual layers.
51
+ upsampling_ratios (`Sequence[int]`, *optional*):
52
+ Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it
53
+ will use the ratios in the reverse order to the ones specified here that must match the decoder order.
54
+ If not specified, will defaults to `[8, 6, 5, 4]`
55
+ kernel_size (`int`, *optional*, defaults to 7):
56
+ Kernel size for the initial convolution.
57
+ last_kernel_size (`int`, *optional*, defaults to 3):
58
+ Kernel size for the last convolution layer.
59
+ residual_kernel_size (`int`, *optional*, defaults to 3):
60
+ Kernel size for the residual layers.
61
+ dilation_growth_rate (`int`, *optional*, defaults to 2):
62
+ How much to increase the dilation with each layer.
63
+ use_causal_conv (`bool`, *optional*, defaults to `True`):
64
+ Whether to use fully causal convolution.
65
+ pad_mode (`str`, *optional*, defaults to `"constant"`):
66
+ Padding mode for the convolutions.
67
+ compress (`int`, *optional*, defaults to 2):
68
+ Reduced dimensionality in residual branches.
69
+ trim_right_ratio (`float`, *optional*, defaults to 1.0):
70
+ Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If
71
+ equal to 1.0, it means that all the trimming is done at the right.
72
+ codebook_size (`int`, *optional*, defaults to 2048):
73
+ Number of discret codes in each codebooks.
74
+ codebook_dim (`int`, *optional*, defaults to 256):
75
+ Dimension of the unquantized codebook vectors. If not defined, uses `hidden_size`.
76
+ num_quantizers (`int`, *optional*, defaults to 32):
77
+ Number of quantizer channels, or codebooks, in the quantizer.
78
+ use_conv_shortcut (`bool`, *optional*, defaults to `False`):
79
+ Whether to use a convolutional layer as the 'skip' connection in the `MimiResnetBlock` block. If False,
80
+ an identity function will be used, giving a generic residual connection.
81
+ vector_quantization_hidden_dimension (`int`, *optional*, defaults to 256):
82
+ Intermediate representation dimension in the residual vector quantization space.
83
+ num_semantic_quantizers (`int`, *optional*, defaults to 1):
84
+ Number of semantic quantizer channels, or codebooks, in the semantic quantizer. Must be lower than `num_quantizers`.
85
+ upsample_groups (`int`, *optional*, defaults to 512):
86
+ If `frame_rate!=encodec_frame_rate`, indicates the number of groups used in the upsampling operation to go from one rate to another.
87
+ num_hidden_layers (`int`, *optional*, defaults to 8):
88
+ Number of hidden layers in the Transformer models.
89
+ intermediate_size (`int`, *optional*, defaults to 2048):
90
+ Dimension of the MLP representations.
91
+ num_attention_heads (`int`, *optional*, defaults to 8):
92
+ Number of attention heads for each attention layer in the Transformer encoder.
93
+ num_key_value_heads (`int`, *optional*, defaults to 8):
94
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
95
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
96
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
97
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
98
+ by meanpooling all the original heads within that group. For more details, check out [this
99
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
100
+ head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
101
+ The attention head dimension.
102
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
103
+ The non-linear activation function (function or string) in the decoder.
104
+ max_position_embeddings (`int`, *optional*, defaults to 8000):
105
+ The maximum sequence length that this model might ever be used with. Mimi's sliding window attention
106
+ allows sequence of up to 8000 tokens.
107
+ initializer_range (`float`, *optional*, defaults to 0.02):
108
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
109
+ norm_eps (`float`, *optional*, defaults to 1e-05):
110
+ The epsilon used by the LayerNorm normalization layers.
111
+ use_cache (`bool`, *optional*, defaults to `False`):
112
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
113
+ relevant if `config.is_decoder=True`.
114
+ use_streaming (`bool`, *optional*, defaults to `False`):
115
+ Whether to use streaming mode. If `True`, the model encode method will return the padding cache that can be used in a subsequent call to the encode method.
116
+ rope_theta (`float`, *optional*, defaults to 10000.0):
117
+ The base period of the RoPE embeddings.
118
+ sliding_window (`int`, *optional*, defaults to 250):
119
+ Sliding window attention window size. If not specified, will default to `250`.
120
+ attention_dropout (`float`, *optional*, defaults to 0.0):
121
+ The dropout ratio for the attention probabilities.
122
+ layer_scale_initial_scale (`float`, *optional*, defaults to 0.01):
123
+ Initiale scale of the residual rescaling operation done in the Transformer models.
124
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
125
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
126
+ Example:
127
+
128
+ ```python
129
+ >>> from transformers import MimiModel, MimiConfig
130
+
131
+ >>> # Initializing a "kyutai/mimi" style configuration
132
+ >>> configuration = MimiConfig()
133
+
134
+ >>> # Initializing a model (with random weights) from the "kyutai/mimi" style configuration
135
+ >>> model = MimiModel(configuration)
136
+
137
+ >>> # Accessing the model configuration
138
+ >>> configuration = model.config
139
+ ```"""
140
+
141
+ model_type = "mimi"
142
+
143
+ def __init__(
144
+ self,
145
+ sampling_rate=24_000,
146
+ frame_rate=None,
147
+ audio_channels=1,
148
+ hidden_size=512,
149
+ num_filters=64,
150
+ num_residual_layers=1,
151
+ upsampling_ratios=None,
152
+ kernel_size=7,
153
+ last_kernel_size=3,
154
+ residual_kernel_size=3,
155
+ dilation_growth_rate=2,
156
+ use_causal_conv=True,
157
+ pad_mode="constant",
158
+ compress=2,
159
+ trim_right_ratio=1.0,
160
+ codebook_size=2048,
161
+ codebook_dim=256,
162
+ num_quantizers=32,
163
+ use_conv_shortcut=False,
164
+ vector_quantization_hidden_dimension=256,
165
+ num_semantic_quantizers=1,
166
+ upsample_groups=512,
167
+ num_hidden_layers=8,
168
+ intermediate_size=2048,
169
+ num_attention_heads=8,
170
+ num_key_value_heads=8,
171
+ head_dim=None,
172
+ hidden_act="gelu",
173
+ max_position_embeddings=8000,
174
+ initializer_range=0.02,
175
+ norm_eps=1e-5,
176
+ use_cache=False,
177
+ use_streaming=False,
178
+ rope_theta=10000.0,
179
+ sliding_window=250,
180
+ attention_dropout=0.0,
181
+ layer_scale_initial_scale=0.01,
182
+ attention_bias=False,
183
+ **kwargs,
184
+ ):
185
+ self.sampling_rate = sampling_rate
186
+ self.audio_channels = audio_channels
187
+ self.hidden_size = hidden_size
188
+ self.num_filters = num_filters
189
+ self.num_residual_layers = num_residual_layers
190
+ self.upsampling_ratios = upsampling_ratios if upsampling_ratios else [8, 6, 5, 4]
191
+ self.kernel_size = kernel_size
192
+ self.last_kernel_size = last_kernel_size
193
+ self.residual_kernel_size = residual_kernel_size
194
+ self.dilation_growth_rate = dilation_growth_rate
195
+ self.use_causal_conv = use_causal_conv
196
+ self.pad_mode = pad_mode
197
+ self.compress = compress
198
+ self.trim_right_ratio = trim_right_ratio
199
+ self.codebook_size = codebook_size
200
+ self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size
201
+ self.num_quantizers = num_quantizers
202
+ self.use_conv_shortcut = use_conv_shortcut
203
+ self.vector_quantization_hidden_dimension = vector_quantization_hidden_dimension
204
+ self.upsample_groups = upsample_groups
205
+ self.num_hidden_layers = num_hidden_layers
206
+ self.intermediate_size = intermediate_size
207
+ self.num_attention_heads = num_attention_heads
208
+ self.num_key_value_heads = num_key_value_heads
209
+ self.hidden_act = hidden_act
210
+ self.max_position_embeddings = max_position_embeddings
211
+ self.initializer_range = initializer_range
212
+ self.norm_eps = norm_eps
213
+ self.use_cache = use_cache
214
+ self.use_streaming = use_streaming
215
+ self.rope_theta = rope_theta
216
+ self.sliding_window = sliding_window
217
+ self.attention_dropout = attention_dropout
218
+ self.head_dim = head_dim or hidden_size // num_attention_heads
219
+ self.layer_scale_initial_scale = layer_scale_initial_scale
220
+ self.attention_bias = attention_bias
221
+
222
+ # Handle backward compatibility for frame_rate:
223
+ # If frame_rate is explicitly provided, use it (backward compatibility)
224
+ # Otherwise, compute it from other parameters (correctly)
225
+ if frame_rate is not None:
226
+ self._frame_rate = frame_rate
227
+ else:
228
+ self._frame_rate = None
229
+
230
+ if num_semantic_quantizers >= self.num_quantizers:
231
+ raise ValueError(
232
+ f"The number of semantic quantizers should be lower than the total number of quantizers {self.num_quantizers}, but is currently {num_semantic_quantizers}."
233
+ )
234
+ self.num_semantic_quantizers = num_semantic_quantizers
235
+ super().__init__(**kwargs)
236
+
237
+ @property
238
+ def encodec_frame_rate(self) -> int:
239
+ hop_length = np.prod(self.upsampling_ratios)
240
+ return math.ceil(self.sampling_rate / hop_length)
241
+
242
+ @property
243
+ def num_codebooks(self) -> int:
244
+ # alias to num_quantizers
245
+ return self.num_quantizers
246
+
247
+ @property
248
+ def frame_size(self) -> int:
249
+ # 1. we need each encoder conv stride
250
+ # first conv
251
+ strides = [1]
252
+
253
+ # layer convs
254
+ for ratio in reversed(self.upsampling_ratios):
255
+ for j in range(self.num_residual_layers):
256
+ len_kernel_sizes = len(self.residual_kernel_size) if isinstance(self.residual_kernel_size, list) else 1
257
+ strides.extend([1] * (len_kernel_sizes + 1))
258
+ if self.use_conv_shortcut: # skip connection
259
+ strides.append(1)
260
+
261
+ strides.append(ratio)
262
+
263
+ # last conv
264
+ strides.append(1)
265
+
266
+ # downsampling layer
267
+ strides.append(2)
268
+
269
+ return math.prod(strides)
270
+
271
+ @property
272
+ def frame_rate(self) -> float:
273
+ # handle backward compatibility
274
+ if self._frame_rate is not None:
275
+ return self._frame_rate
276
+ return self.sampling_rate / self.frame_size
277
+
278
+
279
+ __all__ = ["MimiConfig"]