text stringlengths 1 1.02k | class_index int64 0 10.8k | source stringlengths 85 188 |
|---|---|---|
class GPTNeoXForQuestionAnswering(GPTNeoXPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.gpt_neox = GPTNeoXModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
# Initialize weights and apply final processing
self.post_init() | 3,660 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py |
@add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]: | 3,660 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py |
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 3,660 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py |
outputs = self.gpt_neox(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous() | 3,660 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py |
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1).to(start_logits.device)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1).to(end_logits.device)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2 | 3,660 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py |
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) | 3,660 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py |
class PoolFormerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of [`PoolFormerModel`]. It is used to instantiate a
PoolFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the PoolFormer
[sail/poolformer_s12](https://huggingface.co/sail/poolformer_s12) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. | 3,661 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py |
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of channels in the input image.
patch_size (`int`, *optional*, defaults to 16):
The size of the input patch.
stride (`int`, *optional*, defaults to 16):
The stride of the input patch.
pool_size (`int`, *optional*, defaults to 3):
The size of the pooling window.
mlp_ratio (`float`, *optional*, defaults to 4.0):
The ratio of the number of channels in the output of the MLP to the number of channels in the input.
depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):
The depth of each encoder block.
hidden_sizes (`list`, *optional*, defaults to `[64, 128, 320, 512]`):
The hidden sizes of each encoder block.
patch_sizes (`list`, *optional*, defaults to `[7, 3, 3, 3]`):
The size of the input patch for each encoder block. | 3,661 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py |
strides (`list`, *optional*, defaults to `[4, 2, 2, 2]`):
The stride of the input patch for each encoder block.
padding (`list`, *optional*, defaults to `[2, 1, 1, 1]`):
The padding of the input patch for each encoder block.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The dropout rate for the dropout layers.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function for the hidden layers.
use_layer_scale (`bool`, *optional*, defaults to `True`):
Whether to use layer scale.
layer_scale_init_value (`float`, *optional*, defaults to 1e-05):
The initial value for the layer scale.
initializer_range (`float`, *optional*, defaults to 0.02):
The initializer range for the weights. | 3,661 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py |
Example:
```python
>>> from transformers import PoolFormerConfig, PoolFormerModel
>>> # Initializing a PoolFormer sail/poolformer_s12 style configuration
>>> configuration = PoolFormerConfig()
>>> # Initializing a model (with random weights) from the sail/poolformer_s12 style configuration
>>> model = PoolFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "poolformer" | 3,661 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py |
def __init__(
self,
num_channels=3,
patch_size=16,
stride=16,
pool_size=3,
mlp_ratio=4.0,
depths=[2, 2, 6, 2],
hidden_sizes=[64, 128, 320, 512],
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
padding=[2, 1, 1, 1],
num_encoder_blocks=4,
drop_path_rate=0.0,
hidden_act="gelu",
use_layer_scale=True,
layer_scale_init_value=1e-5,
initializer_range=0.02,
**kwargs,
):
self.num_channels = num_channels
self.patch_size = patch_size
self.stride = stride
self.padding = padding
self.pool_size = pool_size
self.hidden_sizes = hidden_sizes
self.mlp_ratio = mlp_ratio
self.depths = depths
self.patch_sizes = patch_sizes
self.strides = strides
self.num_encoder_blocks = num_encoder_blocks
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act | 3,661 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py |
self.use_layer_scale = use_layer_scale
self.layer_scale_init_value = layer_scale_init_value
self.initializer_range = initializer_range
super().__init__(**kwargs) | 3,661 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py |
class PoolFormerOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse("1.11")
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
]
)
@property
def atol_for_validation(self) -> float:
return 2e-3 | 3,662 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py |
class PoolFormerDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob) | 3,663 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerEmbeddings(nn.Module):
"""
Construct Patch Embeddings.
"""
def __init__(self, hidden_size, num_channels, patch_size, stride, padding, norm_layer=None):
super().__init__()
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding)
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=padding)
self.norm = norm_layer(hidden_size) if norm_layer else nn.Identity()
def forward(self, pixel_values):
embeddings = self.projection(pixel_values)
embeddings = self.norm(embeddings)
return embeddings | 3,664 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerGroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group. Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs) | 3,665 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerPooling(nn.Module):
def __init__(self, pool_size):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, hidden_states):
return self.pool(hidden_states) - hidden_states | 3,666 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerOutput(nn.Module):
def __init__(self, config, dropout_prob, hidden_size, intermediate_size):
super().__init__()
self.conv1 = nn.Conv2d(hidden_size, intermediate_size, 1)
self.conv2 = nn.Conv2d(intermediate_size, hidden_size, 1)
self.drop = PoolFormerDropPath(dropout_prob)
if isinstance(config.hidden_act, str):
self.act_fn = ACT2FN[config.hidden_act]
else:
self.act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.conv1(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states = self.drop(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.drop(hidden_states)
return hidden_states | 3,667 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerLayer(nn.Module):
"""This corresponds to the 'PoolFormerBlock' class in the original implementation."""
def __init__(self, config, num_channels, pool_size, hidden_size, intermediate_size, drop_path):
super().__init__()
self.pooling = PoolFormerPooling(pool_size)
self.output = PoolFormerOutput(config, drop_path, hidden_size, intermediate_size)
self.before_norm = PoolFormerGroupNorm(num_channels)
self.after_norm = PoolFormerGroupNorm(num_channels) | 3,668 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
# Useful for training neural nets
self.drop_path = PoolFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = config.use_layer_scale
if config.use_layer_scale:
self.layer_scale_1 = nn.Parameter(
config.layer_scale_init_value * torch.ones((num_channels)), requires_grad=True
)
self.layer_scale_2 = nn.Parameter(
config.layer_scale_init_value * torch.ones((num_channels)), requires_grad=True
)
def forward(self, hidden_states):
if self.use_layer_scale:
pooling_output = self.pooling(self.before_norm(hidden_states))
scaled_op = self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
hidden_states = hidden_states + self.drop_path(scaled_op)
outputs = () | 3,668 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
layer_output = self.output(self.after_norm(hidden_states))
scaled_op = self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
output = hidden_states + self.drop_path(scaled_op)
outputs = (output,) + outputs
return outputs
else:
pooling_output = self.drop_path(self.pooling(self.before_norm(hidden_states)))
# First residual connection
hidden_states = pooling_output + hidden_states
outputs = ()
# Second residual connection inside the PoolFormerOutput block
layer_output = self.drop_path(self.output(self.after_norm(hidden_states)))
output = hidden_states + layer_output
outputs = (output,) + outputs
return outputs | 3,668 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
# patch embeddings
embeddings = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i],
stride=config.strides[i],
padding=config.padding[i],
num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
hidden_size=config.hidden_sizes[i],
)
)
self.patch_embeddings = nn.ModuleList(embeddings) | 3,669 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
# Transformer blocks
blocks = []
cur = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
layers = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
config,
num_channels=config.hidden_sizes[i],
pool_size=config.pool_size,
hidden_size=config.hidden_sizes[i],
intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio),
drop_path=dpr[cur + j],
)
)
blocks.append(nn.ModuleList(layers))
self.block = nn.ModuleList(blocks)
def forward(self, pixel_values, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None | 3,669 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
hidden_states = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings, self.block)):
embedding_layer, block_layer = layers
# Get patch embeddings from hidden_states
hidden_states = embedding_layer(hidden_states)
# Send the embeddings through the blocks
for _, blk in enumerate(block_layer):
layer_outputs = blk(hidden_states)
hidden_states = layer_outputs[0]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) | 3,669 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = PoolFormerConfig
base_model_prefix = "poolformer"
main_input_name = "pixel_values"
_no_split_modules = ["PoolFormerLayer"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0) | 3,670 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerModel(PoolFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.encoder = PoolFormerEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings | 3,671 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
@add_start_docstrings_to_model_forward(POOLFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithNoAttention,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values") | 3,671 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
encoder_outputs = self.encoder(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
) | 3,671 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerFinalPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
output = self.dense(hidden_states)
return output | 3,672 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerForImageClassification(PoolFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.poolformer = PoolFormerModel(config)
# Final norm
self.norm = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
self.classifier = (
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init() | 3,673 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
@add_start_docstrings_to_model_forward(POOLFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=ImageClassifierOutputWithNoAttention,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
""" | 3,673 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 3,673 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
outputs = self.poolformer(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(self.norm(sequence_output).mean([-2, -1]))
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification" | 3,673 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) | 3,673 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py |
class PoolFormerImageProcessor(BaseImageProcessor):
r"""
Constructs a PoolFormer image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. If crop_pct is
unset:
- size is `{"height": h, "width": w}`: the image is resized to `(h, w)`.
- size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the
aspect ratio. | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
If crop_pct is set:
- size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)),
int(floor(w/crop_pct)))`
- size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
whilst maintaining the aspect ratio.
- size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
whilst maintaining the aspect ratio.
crop_pct (`float`, *optional*, defaults to 0.9):
Percentage of the image to crop from the center. Can be overridden by `crop_pct` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`): | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in the `preprocess`
method.
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after applying center crop. Only has an effect if `do_center_crop` is set to `True`. Can
be overridden by the `crop_size` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method. | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
do_normalize (`bool`, *optional*, defaults to `True`):
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
""" | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
crop_pct: int = 0.9,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Dict[str, int] = None,
rescale_factor: Union[int, float] = 1 / 255,
do_rescale: bool = True,
do_normalize: bool = True,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, param_name="crop_size") | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
self.do_resize = do_resize
self.size = size
self.crop_pct = crop_pct
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
crop_pct: Optional[float] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
If crop_pct is unset:
- size is `{"height": h, "width": w}`: the image is resized to `(h, w)`.
- size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the
aspect ratio.
if crop_pct is set:
- size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)),
int(floor(w/crop_pct)))`
- size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
whilst maintaining the aspect ratio.
- size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
whilst maintaining the aspect ratio. | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
crop_pct (`float`, *optional*):
Percentage of the image that will be cropped from the center. If set, the image is resized
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, default_to_square=False)
if "shortest_edge" not in size and ("height" not in size or "width" not in size): | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
if crop_pct is not None:
if "shortest_edge" in size:
scale_size = int(size["shortest_edge"] / crop_pct)
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
scale_size = int(size["height"] / crop_pct)
else:
scale_size = (int(size["height"] / crop_pct), int(size["width"] / crop_pct))
else:
raise ValueError("Invalid size for resize: {}".format(size)) | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
output_size = get_resize_output_image_size(
image, size=scale_size, default_to_square=False, input_data_format=input_data_format
)
else:
if "shortest_edge" in size:
output_size = get_resize_output_image_size(
image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
)
elif "height" in size and "width" in size:
output_size = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(size))
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
) | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
crop_pct: int = None,
resample: PILImageResampling = None,
do_center_crop: bool = None,
crop_size: Dict[str, int] = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images. | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after applying resize.
crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
Whether to center crop the image.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after applying center crop.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*): | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*): | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
crop_pct = crop_pct if crop_pct is not None else self.crop_pct
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
) | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [
self.resize(
image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
)
for image in images
] | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors) | 3,674 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py |
class PoolFormerFeatureExtractor(PoolFormerImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead.",
FutureWarning,
)
super().__init__(*args, **kwargs) | 3,675 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/feature_extraction_poolformer.py |
class MimiConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`MimiModel`]. It is used to instantiate a
Mimi model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[kyutai/mimi](https://huggingface.co/kyutai/mimi) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
Args:
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
frame_rate (`float`, *optional*, defaults to 12.5):
Framerate of the model.
audio_channels (`int`, *optional*, defaults to 1):
Number of channels in the audio data. Either 1 for mono or 2 for stereo.
hidden_size (`int`, *optional*, defaults to 512):
Intermediate representation dimension.
num_filters (`int`, *optional*, defaults to 64):
Number of convolution kernels of first `MimiConv1d` down sampling layer.
num_residual_layers (`int`, *optional*, defaults to 1):
Number of residual layers.
upsampling_ratios (`Sequence[int]`, *optional*):
Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
will use the ratios in the reverse order to the ones specified here that must match the decoder order.
If not specified, will defaults to `[8, 6, 5, 4]`
kernel_size (`int`, *optional*, defaults to 7):
Kernel size for the initial convolution.
last_kernel_size (`int`, *optional*, defaults to 3):
Kernel size for the last convolution layer.
residual_kernel_size (`int`, *optional*, defaults to 3):
Kernel size for the residual layers.
dilation_growth_rate (`int`, *optional*, defaults to 2):
How much to increase the dilation with each layer.
use_causal_conv (`bool`, *optional*, defaults to `True`):
Whether to use fully causal convolution.
pad_mode (`str`, *optional*, defaults to `"constant"`):
Padding mode for the convolutions.
compress (`int`, *optional*, defaults to 2):
Reduced dimensionality in residual branches. | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
trim_right_ratio (`float`, *optional*, defaults to 1.0):
Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If
equal to 1.0, it means that all the trimming is done at the right.
codebook_size (`int`, *optional*, defaults to 2048):
Number of discret codes in each codebooks.
codebook_dim (`int`, *optional*, defaults to 256):
Dimension of the unquantized codebook vectors. If not defined, uses `hidden_size`.
num_quantizers (`int`, *optional*, defaults to 32):
Number of quantizer channels, or codebooks, in the quantizer.
use_conv_shortcut (`bool`, *optional*, defaults to `False`):
Whether to use a convolutional layer as the 'skip' connection in the `MimiResnetBlock` block. If False,
an identity function will be used, giving a generic residual connection.
vector_quantization_hidden_dimension (`int`, *optional*, defaults to 256): | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
Intermediate representation dimension in the residual vector quantization space.
num_semantic_quantizers (`int`, *optional*, defaults to 1):
Number of semantic quantizer channels, or codebooks, in the semantic quantizer. Must be lower than `num_quantizers`.
upsample_groups (`int`, *optional*, defaults to 512):
If `frame_rate!=encodec_frame_rate`, indicates the number of groups used in the upsampling operation to go from one rate to another.
num_hidden_layers (`int`, *optional*, defaults to 8):
Number of hidden layers in the Transformer models.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the MLP representations.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8): | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 8000): | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
The maximum sequence length that this model might ever be used with. Mimi's sliding window attention
allows sequence of up to 8000 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the LayerNorm normalization layers.
use_cache (`bool`, *optional*, defaults to `False`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
sliding_window (`int`, *optional*, defaults to 250):
Sliding window attention window size. If not specified, will default to `250`.
attention_dropout (`float`, *optional*, defaults to 0.0): | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
The dropout ratio for the attention probabilities.
layer_scale_initial_scale (`float`, *optional*, defaults to 0.01):
Initiale scale of the residual rescaling operation done in the Transformer models.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
Example: | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
```python
>>> from transformers import MimiModel, MimiConfig
>>> # Initializing a "kyutai/mimi" style configuration
>>> configuration = MimiConfig()
>>> # Initializing a model (with random weights) from the "kyutai/mimi" style configuration
>>> model = MimiModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mimi" | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
def __init__(
self,
sampling_rate=24_000,
frame_rate=12.5,
audio_channels=1,
hidden_size=512,
num_filters=64,
num_residual_layers=1,
upsampling_ratios=None,
kernel_size=7,
last_kernel_size=3,
residual_kernel_size=3,
dilation_growth_rate=2,
use_causal_conv=True,
pad_mode="constant",
compress=2,
trim_right_ratio=1.0,
codebook_size=2048,
codebook_dim=256,
num_quantizers=32,
use_conv_shortcut=False,
vector_quantization_hidden_dimension=256,
num_semantic_quantizers=1,
upsample_groups=512,
num_hidden_layers=8,
intermediate_size=2048,
num_attention_heads=8,
num_key_value_heads=8,
head_dim=None,
hidden_act="gelu",
max_position_embeddings=8000,
initializer_range=0.02,
norm_eps=1e-5,
use_cache=False,
rope_theta=10000.0, | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
sliding_window=250,
attention_dropout=0.0,
layer_scale_initial_scale=0.01,
attention_bias=False,
**kwargs,
):
self.sampling_rate = sampling_rate
self.frame_rate = frame_rate
self.audio_channels = audio_channels
self.hidden_size = hidden_size
self.num_filters = num_filters
self.num_residual_layers = num_residual_layers
self.upsampling_ratios = upsampling_ratios if upsampling_ratios else [8, 6, 5, 4]
self.kernel_size = kernel_size
self.last_kernel_size = last_kernel_size
self.residual_kernel_size = residual_kernel_size
self.dilation_growth_rate = dilation_growth_rate
self.use_causal_conv = use_causal_conv
self.pad_mode = pad_mode
self.compress = compress
self.trim_right_ratio = trim_right_ratio
self.codebook_size = codebook_size
self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
self.num_quantizers = num_quantizers
self.use_conv_shortcut = use_conv_shortcut
self.vector_quantization_hidden_dimension = vector_quantization_hidden_dimension
self.upsample_groups = upsample_groups
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.norm_eps = norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.sliding_window = sliding_window
self.attention_dropout = attention_dropout
self.head_dim = head_dim or hidden_size // num_attention_heads
self.layer_scale_initial_scale = layer_scale_initial_scale
self.attention_bias = attention_bias | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
if num_semantic_quantizers >= self.num_quantizers:
raise ValueError(
f"The number of semantic quantizers should be lower than the total number of quantizers {self.num_quantizers}, but is currently {num_semantic_quantizers}."
)
self.num_semantic_quantizers = num_semantic_quantizers
super().__init__(**kwargs)
@property
def encodec_frame_rate(self) -> int:
hop_length = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def num_codebooks(self) -> int:
# alias to num_quantizers
return self.num_quantizers | 3,676 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py |
class MimiOutput(ModelOutput):
"""
Args:
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
Discret code embeddings computed using `model.encode`.
audio_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*)
Decoded audio values, obtained using the decoder part of Mimi.
encoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input. | 3,677 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
decoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
""" | 3,677 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
audio_codes: torch.LongTensor = None
audio_values: torch.FloatTensor = None
encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None
decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None | 3,677 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiEncoderOutput(ModelOutput):
"""
Args:
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
Discret code embeddings computed using `model.encode`.
encoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
""" | 3,678 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
audio_codes: torch.LongTensor = None
encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None | 3,678 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiDecoderOutput(ModelOutput):
"""
Args:
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Mimi.
decoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
""" | 3,679 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
audio_values: torch.FloatTensor = None
decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None | 3,679 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiConv1d(nn.Module):
"""Conv1d with asymmetric or causal padding and normalization."""
def __init__(
self,
config,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
pad_mode=None,
bias: bool = True,
):
super().__init__()
self.causal = config.use_causal_conv
self.pad_mode = config.pad_mode if pad_mode is None else pad_mode
# warn user on unusual setup between dilation and stride
if stride > 1 and dilation > 1:
logger.warning(
"MimiConv1d has been initialized with stride > 1 and dilation > 1"
f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})."
)
self.conv = nn.Conv1d(
in_channels, out_channels, kernel_size, stride, dilation=dilation, groups=groups, bias=bias
) | 3,680 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
kernel_size = self.conv.kernel_size[0]
stride = torch.tensor(self.conv.stride[0], dtype=torch.int64)
dilation = self.conv.dilation[0]
# Effective kernel size with dilations.
kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64)
self.register_buffer("stride", stride, persistent=False)
self.register_buffer("kernel_size", kernel_size, persistent=False)
self.register_buffer("padding_total", torch.tensor(kernel_size - stride, dtype=torch.int64), persistent=False)
# Asymmetric padding required for odd strides
self.padding_right = self.padding_total // 2
self.padding_left = self.padding_total - self.padding_right
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
weight_norm(self.conv) | 3,680 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv)
# Copied from transformers.models.encodec.modeling_encodec.EncodecConv1d._get_extra_padding_for_conv1d
def _get_extra_padding_for_conv1d(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor:
"""See `pad_for_conv1d`."""
length = hidden_states.shape[-1]
n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1
n_frames = torch.ceil(n_frames).to(torch.int64) - 1
ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total
return ideal_length - length | 3,680 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
@staticmethod
# Copied from transformers.models.encodec.modeling_encodec.EncodecConv1d._pad1d
def _pad1d(hidden_states: torch.Tensor, paddings: Tuple[int, int], mode: str = "zero", value: float = 0.0):
"""Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happens.
"""
length = hidden_states.shape[-1]
padding_left, padding_right = paddings
if not mode == "reflect":
return nn.functional.pad(hidden_states, paddings, mode, value)
max_pad = max(padding_left, padding_right)
extra_pad = 0
if length <= max_pad:
extra_pad = max_pad - length + 1
hidden_states = nn.functional.pad(hidden_states, (0, extra_pad))
padded = nn.functional.pad(hidden_states, paddings, mode, value)
end = padded.shape[-1] - extra_pad
return padded[..., :end] | 3,680 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def forward(self, hidden_states):
extra_padding = self._get_extra_padding_for_conv1d(hidden_states)
if self.causal:
# Left padding for causal
hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode)
else:
hidden_states = self._pad1d(
hidden_states, (self.padding_left, self.padding_right + extra_padding), mode=self.pad_mode
)
hidden_states = self.conv(hidden_states)
return hidden_states | 3,680 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiConvTranspose1d(nn.Module):
"""ConvTranspose1d with asymmetric or causal padding and normalization."""
def __init__(
self,
config,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
groups: int = 1,
bias=True,
):
super().__init__()
self.causal = config.use_causal_conv
self.trim_right_ratio = config.trim_right_ratio
self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride, groups=groups, bias=bias)
if not (self.causal or self.trim_right_ratio == 1.0):
raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions")
kernel_size = self.conv.kernel_size[0]
stride = self.conv.stride[0]
padding_total = kernel_size - stride | 3,681 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
# removed at the very end, when keeping only the right length for the output,
# as removing it here would require also passing the length at the matching layer
# in the encoder.
if self.causal:
# Trim the padding on the right according to the specified ratio
# if trim_right_ratio = 1.0, trim everything from right
self.padding_right = math.ceil(padding_total * self.trim_right_ratio)
else:
# Asymmetric padding required for odd strides
self.padding_right = padding_total // 2
self.padding_left = padding_total - self.padding_right
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
weight_norm(self.conv) | 3,681 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
# unpad
end = hidden_states.shape[-1] - self.padding_right
hidden_states = hidden_states[..., self.padding_left : end]
return hidden_states | 3,681 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiResnetBlock(nn.Module):
"""
Residual block from SEANet model as used by Mimi.
"""
def __init__(self, config: MimiConfig, dim: int, dilations: List[int]):
super().__init__()
kernel_sizes = (config.residual_kernel_size, 1)
if len(kernel_sizes) != len(dilations):
raise ValueError("Number of kernel sizes should match number of dilations")
hidden = dim // config.compress
block = []
for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
in_chs = dim if i == 0 else hidden
out_chs = dim if i == len(kernel_sizes) - 1 else hidden
block += [nn.ELU()]
block += [MimiConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)]
self.block = nn.ModuleList(block)
if config.use_conv_shortcut:
self.shortcut = MimiConv1d(config, dim, dim, kernel_size=1)
else:
self.shortcut = nn.Identity() | 3,682 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def forward(self, hidden_states):
residual = hidden_states
for layer in self.block:
hidden_states = layer(hidden_states)
return self.shortcut(residual) + hidden_states | 3,682 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiEncoder(nn.Module):
"""SEANet encoder as used by Mimi."""
def __init__(self, config: MimiConfig):
super().__init__()
model = [MimiConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)]
scaling = 1
# Downsample to raw audio scale
for ratio in reversed(config.upsampling_ratios):
current_scale = scaling * config.num_filters
# Add residual layers
for j in range(config.num_residual_layers):
model += [MimiResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])]
# Add downsampling layers
model += [nn.ELU()]
model += [MimiConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)]
scaling *= 2
model += [nn.ELU()]
model += [MimiConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)]
self.layers = nn.ModuleList(model) | 3,683 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# Copied from transformers.models.encodec.modeling_encodec.EncodecEncoder.forward
def forward(self, hidden_states):
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states | 3,683 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiLayerScale(nn.Module):
"""Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
This rescales diagonally the residual outputs close to 0, with a learnt scale.
"""
def __init__(self, config):
super().__init__()
channels = config.hidden_size
initial_scale = config.layer_scale_initial_scale
self.scale = nn.Parameter(torch.full((channels,), initial_scale, requires_grad=True))
def forward(self, x: torch.Tensor):
return self.scale * x | 3,684 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiRotaryEmbedding(nn.Module):
def __init__(self, config: MimiConfig, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq | 3,685 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def _dynamic_frequency_update(self, position_ids, device):
"""
dynamic RoPE layers should recompute `inv_freq` in the following situations:
1 - growing beyond the cached sequence length (allow scaling)
2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
"""
seq_len = torch.max(position_ids) + 1
if seq_len > self.max_seq_len_cached: # growth
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
self.max_seq_len_cached = seq_len | 3,685 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
# This .to() is needed if the model has been moved to a device after being initialized (because
# the buffer is automatically moved, but not the original copy)
self.original_inv_freq = self.original_inv_freq.to(device)
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
self.max_seq_len_cached = self.original_max_seq_len
@torch.no_grad()
def forward(self, x, position_ids):
if "dynamic" in self.rope_type:
self._dynamic_frequency_update(position_ids, device=x.device) | 3,685 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# Core RoPE block
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
# Force float32 (see https://github.com/huggingface/transformers/pull/29285)
device_type = x.device.type
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos()
sin = emb.sin()
# Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
cos = cos * self.attention_scaling
sin = sin * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) | 3,685 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
# Copied from transformers.models.clip.modeling_clip.CLIPMLP.forward
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states | 3,686 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: MimiConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
) | 3,687 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = config.head_dim
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.scaling = 1 / math.sqrt(config.head_dim)
if self.hidden_size % self.num_heads != 0:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
) | 3,687 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
self.rotary_emb = MimiRotaryEmbedding(config)
self.sliding_window = config.sliding_window # Ignore copy | 3,687 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) | 3,687 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
cos, sin = self.rotary_emb(value_states, position_ids)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask | 3,687 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value | 3,687 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
class MimiFlashAttention2(MimiAttention):
"""
Mimi flash attention module. This module inherits from `MimiAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) | 3,688 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() | 3,688 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.